code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase_ : str =1 @register_to_config def __init__( self ,A__=2_0_0_0 ,A__=0.1 ,A__=2_0 ,A__=1E-3): lowercase = None lowercase = None lowercase = None def A__ ( self ,A__ ,A__ = None): lowercase = torch.linspace(1 ,self.config.sampling_eps ,A__ ,device=A__) def A__ ( self ,A__ ,A__ ,A__ ,A__=None): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''') # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowercase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) lowercase = std.flatten() while len(std.shape) < len(score.shape): lowercase = std.unsqueeze(-1) lowercase = -score / std # compute lowercase = -1.0 / len(self.timesteps) lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowercase = beta_t.flatten() while len(beta_t.shape) < len(x.shape): lowercase = beta_t.unsqueeze(-1) lowercase = -0.5 * beta_t * x lowercase = torch.sqrt(A__) lowercase = drift - diffusion**2 * score lowercase = x + drift * dt # add noise lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=A__ ,device=x.device ,dtype=x.dtype) lowercase = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__( self): return self.config.num_train_timesteps
101
"""simple docstring""" import pprint import requests _lowercase : Optional[Any] = 'https://zenquotes.io/api' def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": _lowercase : int = random_quotes() pprint.pprint(response)
332
0
"""simple docstring""" import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE : List[str] = getLogger(__name__) def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : int = 1_024 , _snake_case : Any="val" , _snake_case : Tuple=None , _snake_case : Any=False , _snake_case : str="summarization" , _snake_case : Dict=None , _snake_case : Optional[Any]=1 , _snake_case : Dict = None , _snake_case : List[Any]="" , **_snake_case : int , ) ->Dict: """simple docstring""" __snake_case : int = str(_snake_case ) assert local_rank is not None torch.distributed.init_process_group(backend='''nccl''' , rank=_snake_case ) __snake_case : Optional[Any] = Path(_snake_case ) __snake_case : str = save_dir.joinpath(f"""rank_{local_rank}_output.json""" ) torch.cuda.set_device(_snake_case ) __snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda() if fpaa: __snake_case : List[str] = model.half() # determine if we need to increase num_beams use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params __snake_case : Dict = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: __snake_case : Optional[Any] = num_return_sequences __snake_case : Dict = AutoTokenizer.from_pretrained(_snake_case ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. if max_source_length is None: __snake_case : List[str] = tokenizer.model_max_length if prefix is None: __snake_case : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' __snake_case : List[str] = SeqaSeqDataset( _snake_case , _snake_case , _snake_case , max_target_length=1_024 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. __snake_case : Union[str, Any] = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case ) __snake_case : List[Any] = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn ) __snake_case : Union[str, Any] = [] for batch in tqdm(_snake_case ): __snake_case : Tuple = model.generate( input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , ) __snake_case : List[Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) __snake_case : List[str] = batch['''ids'''] if num_return_sequences > 1: __snake_case : Dict = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(_snake_case ): results.append({'''pred''': pred, '''id''': ids[i].item()} ) save_json(_snake_case , _snake_case ) return results, sampler.num_replicas def lowercase ( ) ->int: """simple docstring""" __snake_case : Any = argparse.ArgumentParser( epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' ) parser.add_argument('''--data_dir''' , type=_snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument( '''--model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , ) parser.add_argument('''--save_dir''' , type=_snake_case , help='''where to save''' , default='''tmp_gen''' ) parser.add_argument('''--max_source_length''' , type=_snake_case , default=_snake_case ) parser.add_argument( '''--type_path''' , type=_snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' ) parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' ) parser.add_argument( '''--local_rank''' , type=_snake_case , default=-1 , required=_snake_case , help='''should be passed by distributed.launch''' ) parser.add_argument( '''--n_obs''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument( '''--num_return_sequences''' , type=_snake_case , default=1 , required=_snake_case , help='''How many sequences to return''' ) parser.add_argument( '''--sync_timeout''' , type=_snake_case , default=600 , required=_snake_case , help='''How long should master process wait for other processes to finish.''' , ) parser.add_argument('''--src_lang''' , type=_snake_case , default=_snake_case , required=_snake_case ) parser.add_argument('''--tgt_lang''' , type=_snake_case , default=_snake_case , required=_snake_case ) parser.add_argument( '''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--debug''' , action='''store_true''' ) __snake_case : str = time.time() __snake_case , __snake_case : Any = parser.parse_known_args() __snake_case : List[Any] = parse_numeric_n_bool_cl_kwargs(_snake_case ) if generate_kwargs and args.local_rank <= 0: print(f"""parsed the following generate kwargs: {generate_kwargs}""" ) __snake_case : List[Any] = Path(args.save_dir + '''_tmp''' ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking. __snake_case : Optional[int] = list(json_save_dir.glob('''rank_*.json''' ) ) if intermediate_files: raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. __snake_case : Dict = {} if args.src_lang is not None: __snake_case : Dict = args.src_lang if args.tgt_lang is not None: __snake_case : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=_snake_case ) __snake_case , __snake_case : List[Any] = eval_data_dir( args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , ) if args.local_rank <= 0: __snake_case : int = Path(args.save_dir ) save_dir.mkdir(exist_ok=_snake_case ) __snake_case : Optional[Any] = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout ) __snake_case : str = combine_partial_results(_snake_case ) if args.num_return_sequences > 1: __snake_case : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' ) print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" ) save_json(_snake_case , _snake_case ) return __snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' ) with open(_snake_case ) as f: __snake_case : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(_snake_case )] # Calculate metrics, save metrics, and save _generations.txt __snake_case : List[str] = '''translation''' in args.task __snake_case : List[Any] = calculate_bleu if calc_bleu else calculate_rouge __snake_case : Dict = '''bleu''' if calc_bleu else '''rouge''' __snake_case : Dict = score_fn(_snake_case , _snake_case ) __snake_case : int = len(_snake_case ) __snake_case : Dict = time.time() - start_time __snake_case : Optional[Any] = round(runtime / metrics['''n_obs'''] , 4 ) __snake_case : List[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics __snake_case : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" ) save_json(_snake_case , _snake_case , indent=_snake_case ) print(_snake_case ) write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) ) if args.debug: write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}.target""" ) ) else: shutil.rmtree(_snake_case ) def lowercase ( _snake_case : Union[str, Any] ) ->List: """simple docstring""" __snake_case : List[Any] = [] for partial_result in partial_results: records.extend(_snake_case ) __snake_case : List[str] = sorted(_snake_case , key=lambda _snake_case : x["id"] ) __snake_case : Tuple = [x['''pred'''] for x in records] return preds def lowercase ( _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] ) ->List[Dict[str, List]]: """simple docstring""" __snake_case : List[str] = time.time() logger.info('''waiting for all nodes to finish''' ) __snake_case : List[str] = None while (time.time() - start_wait) < timeout: __snake_case : Any = list(save_dir.glob('''rank_*.json''' ) ) if len(_snake_case ) < num_replicas: continue try: # make sure all json files are fully saved __snake_case : Tuple = lmap(_snake_case , _snake_case ) return json_data except JSONDecodeError: continue else: raise TimeoutError('''Rank 0 gave up on waiting for other processes''' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
102
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def UpperCamelCase( __UpperCamelCase : int ): # A local function to see if a dot lands in the circle. def is_in_circle(__UpperCamelCase : float ,__UpperCamelCase : float ) -> bool: lowerCAmelCase_ : int = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle lowerCAmelCase_ : Optional[int] = mean( int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) ) for _ in range(__UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. lowerCAmelCase_ : str = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Callable[[float], float] ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ,): return mean( function_to_integrate(uniform(__UpperCamelCase ,__UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ): def identity_function(__UpperCamelCase : float ) -> float: return x lowerCAmelCase_ : Optional[int] = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : int = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print('''******************''' ) def UpperCamelCase( __UpperCamelCase : int ): def function_to_integrate(__UpperCamelCase : float ) -> float: return sqrt(4.0 - x * x ) lowerCAmelCase_ : Optional[int] = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,0.0 ,2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
103
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase__ ( snake_case_ :Union[str, Any]=None ): if subparsers is not None: __UpperCAmelCase = subparsers.add_parser('''env''' ) else: __UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = is_xpu_available() __UpperCAmelCase = is_npu_available() __UpperCAmelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(snake_case_ ): __UpperCAmelCase = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(snake_case_ ), '''PyTorch NPU available''': str(snake_case_ ), '''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: __UpperCAmelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case_ , snake_case_ ) else F'''\t{accelerate_config}''' ) print(snake_case_ ) __UpperCAmelCase = accelerate_config return info def lowercase__ ( ): __UpperCAmelCase = env_command_parser() __UpperCAmelCase = parser.parse_args() env_command(snake_case_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
0
'''simple docstring''' import datasets lowerCAmelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCAmelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCAmelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def _A ( A__ , A__ ): """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ): return {"accuracy": simple_accuracy(lowercase__ ,lowercase__ )}
104
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 25_00_04 _lowercase : int = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Any = True a__ : List[str] = True def a ( self : str ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : str ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : str ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : str = "facebook/mbart-large-50-one-to-many-mmt" a__ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
332
0
"""simple docstring""" from __future__ import annotations class __UpperCamelCase : def __init__( self , lowerCAmelCase__ ) -> None: a : str = order # a_{0} ... a_{k} a : int = [1.0] + [0.0] * order # b_{0} ... b_{k} a : Union[str, Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] a : Any = [0.0] * self.order # y[n-1] ... y[n-k] a : Tuple = [0.0] * self.order def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None: if len(lowerCAmelCase__ ) < self.order: a : Dict = [1.0, *a_coeffs] if len(lowerCAmelCase__ ) != self.order + 1: a : Optional[int] = ( f"""Expected a_coeffs to have {self.order + 1} elements """ f"""for {self.order}-order filter, got {len(lowerCAmelCase__ )}""" ) raise ValueError(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) != self.order + 1: a : Optional[Any] = ( f"""Expected b_coeffs to have {self.order + 1} elements """ f"""for {self.order}-order filter, got {len(lowerCAmelCase__ )}""" ) raise ValueError(lowerCAmelCase__ ) a : Any = a_coeffs a : int = b_coeffs def __a ( self , lowerCAmelCase__ ) -> float: a : List[str] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) a : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] a : Optional[Any] = self.input_history[:-1] a : str = self.output_history[:-1] a : int = sample a : List[Any] = result return result
105
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
106
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __lowerCAmelCase : Union[str, Any] = { '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, } def __magic_name__ ( A : List[Any], A : List[Any], A : str, A : List[str], A : Optional[Any], A : Union[str, Any] ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" F""" reinstalling {pkg}.""" ) if not ops[op](version.parse(A ), version.parse(A ) ): raise ImportError( F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __magic_name__ ( A : str, A : Optional[str] = None ): '''simple docstring''' a = F"""\n{hint}""" if hint is not None else "" # non-versioned check if re.match(R"^[\w_\-\d]+$", A ): a , a , a = requirement, None, None else: a = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", A ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" F""" got {requirement}""" ) a , a = match[0] a = want_full.split("," ) # there could be multiple requirements a = {} for w in want_range: a = re.findall(R"^([\s!=<>]{1,2})(.+)", A ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," F""" but got {requirement}""" ) a , a = match[0] a = want_ver if op not in ops: raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": a = ".".join([str(A ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(A, A, A, A, A, A ) return # check if any version is installed try: a = importlib.metadata.version(A ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(A, A, A, A, A, A ) def __magic_name__ ( A : Optional[int] ): '''simple docstring''' a = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(A, A )
107
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase__ ( snake_case_ :ndarray ): return np.dot(snake_case_ , snake_case_ ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , *, _lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ): __UpperCAmelCase = regularization __UpperCAmelCase = gamma if kernel == "linear": __UpperCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __UpperCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __UpperCAmelCase = F'''Unknown kernel: {kernel}''' raise ValueError(_lowercase ) def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ): return np.dot(_lowercase , _lowercase ) def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ): __UpperCAmelCase = observations __UpperCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__UpperCAmelCase) , ) = np.shape(_lowercase ) def to_minimize(_lowercase : ndarray ) -> float: __UpperCAmelCase = 0 ((__UpperCAmelCase) , ) = np.shape(_lowercase ) for i in range(_lowercase ): for j in range(_lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_lowercase ) __UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 ) __UpperCAmelCase = Bounds(0 , self.regularization ) __UpperCAmelCase = minimize( _lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x __UpperCAmelCase = l_star # calculating mean offset of separation plane to points __UpperCAmelCase = 0 for i in range(_lowercase ): for j in range(_lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __UpperCAmelCase = s / n def a ( self : List[Any] , _lowercase : ndarray ): __UpperCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Optional[int] ="longformer" def __init__( self , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0 , snake_case__ = 2 , snake_case__ = 30_522 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3_072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 0.02 , snake_case__ = 1e-12 , snake_case__ = False , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase : Union[str, Any] = attention_window lowerCAmelCase : List[Any] = sep_token_id lowerCAmelCase : Dict = bos_token_id lowerCAmelCase : int = eos_token_id lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Dict = hidden_size lowerCAmelCase : Optional[Any] = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : Tuple = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : List[str] = onnx_export class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None ): """simple docstring""" super().__init__(snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Any = True @property def lowercase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = super().outputs if self.task == "default": lowerCAmelCase : Tuple = {0: "batch"} return outputs @property def lowercase__ ( self ): """simple docstring""" return 1e-4 @property def lowercase__ ( self ): """simple docstring""" return max(super().default_onnx_opset , 14 ) def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Dict = super().generate_dummy_inputs( preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCAmelCase : Dict = torch.zeros_like(inputs["input_ids"] ) # make every second token global lowerCAmelCase : List[Any] = 1 return inputs
108
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin A: str = logging.get_logger(__name__) enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = UNetaDModel __lowerCAmelCase : Tuple = 'sample' @property def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = 4 UpperCAmelCase : Dict = 3 UpperCAmelCase : Optional[Any] = (32, 32) UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } UpperCAmelCase : List[str] = self.dummy_input return init_dict, inputs_dict class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : int = UNetaDModel __lowerCAmelCase : List[str] = 'sample' @property def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : str = 4 UpperCAmelCase : str = 4 UpperCAmelCase : Any = (32, 32) UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' return (4, 32, 32) @property def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' return (4, 32, 32) def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : int = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } UpperCAmelCase : Tuple = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE ) model_accelerate.to(_SCREAMING_SNAKE_CASE ) model_accelerate.eval() UpperCAmelCase : Union[str, Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase : Dict = noise.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = model_accelerate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase , UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE , low_cpu_mem_usage=_SCREAMING_SNAKE_CASE ) model_normal_load.to(_SCREAMING_SNAKE_CASE ) model_normal_load.eval() UpperCAmelCase : int = model_normal_load(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""] assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase : Optional[Any] = noise.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample UpperCAmelCase : Tuple = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase : List[Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 ) ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : List[str] = UNetaDModel __lowerCAmelCase : List[str] = 'sample' @property def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=(32, 32) ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : Tuple = 3 UpperCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_SCREAMING_SNAKE_CASE ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : int = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } UpperCAmelCase : Optional[Any] = self.dummy_input return init_dict, inputs_dict @slow def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = self.dummy_input UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = noise UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) assert image is not None, "Make sure output is not None" @slow def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : int = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : Optional[Any] = 3 UpperCAmelCase : List[Any] = (256, 256) UpperCAmelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample UpperCAmelCase : str = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase : Optional[int] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = 4 UpperCAmelCase : Optional[int] = 3 UpperCAmelCase : Optional[Any] = (32, 32) UpperCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample UpperCAmelCase : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase : int = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' pass
109
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : Tuple , _lowercase : str , _lowercase : str ): __UpperCAmelCase , __UpperCAmelCase = text, pattern __UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase ) def a ( self : Optional[int] , _lowercase : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self : int , _lowercase : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self : Optional[Any] ): # searches pattern in text and returns index positions __UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: __UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase : str = 'ABAABA' _lowercase : Tuple = 'AB' _lowercase : Dict = BoyerMooreSearch(text, pattern) _lowercase : Any = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
332
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCAmelCase : a__ : int a__ : Node | None = None a__ : Node | None = None def lowercase__ ( ): __UpperCAmelCase = Node(1 ) __UpperCAmelCase = Node(2 ) __UpperCAmelCase = Node(3 ) __UpperCAmelCase = Node(4 ) __UpperCAmelCase = Node(5 ) return tree def lowercase__ ( snake_case_ :Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__ ( snake_case_ :Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase__ ( snake_case_ :Node | None ): __UpperCAmelCase = [] if root is None: return output __UpperCAmelCase = deque([root] ) while process_queue: __UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None ): if root is None: return [] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 0 return output def lowercase__ ( ): # Main function for testing. __UpperCAmelCase = make_tree() print(F'''In-order Traversal: {inorder(snake_case_ )}''' ) print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' ) print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' ) print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(snake_case_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(snake_case_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ = logging.getLogger(__name__) def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ): '''simple docstring''' return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) snake_case_ = field( default=_lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case_ = field( default=_lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) snake_case_ = field( default=_lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) snake_case_ = field(metadata={"""help""": """Should contain the data files for the task."""} ) snake_case_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) snake_case_ = field( default=_lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''', snake_case_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE__ : Dict =processors[data_args.task_name]() SCREAMING_SNAKE_CASE__ : Any =processor.get_labels() SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(snake_case_ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__ : str =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=snake_case_, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) SCREAMING_SNAKE_CASE__ : Dict =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) SCREAMING_SNAKE_CASE__ : int =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=snake_case_, cache_dir=model_args.cache_dir, ) # Get datasets SCREAMING_SNAKE_CASE__ : Any =( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=snake_case_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE__ : Dict =( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=snake_case_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(snake_case_, p.label_ids )} # Data collator SCREAMING_SNAKE_CASE__ : int =DataCollatorWithPadding(snake_case_, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE__ : Optional[int] =Trainer( model=snake_case_, args=snake_case_, train_dataset=snake_case_, eval_dataset=snake_case_, compute_metrics=snake_case_, data_collator=snake_case_, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE__ : Any ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) SCREAMING_SNAKE_CASE__ : List[Any] =trainer.evaluate() SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(training_args.output_dir, '''eval_results.txt''' ) if trainer.is_world_master(): with open(snake_case_, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''', snake_case_, snake_case_ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(snake_case_ ) return results def _a( UpperCamelCase__ : Optional[int] ): '''simple docstring''' main() if __name__ == "__main__": main()
152
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
332
0
"""simple docstring""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate SCREAMING_SNAKE_CASE_ : Optional[Any] = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly SCREAMING_SNAKE_CASE_ : Optional[int] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" def lowercase__ ( snake_case_ :Union[str, Any] ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = max(snake_case_ ) __UpperCAmelCase = min(snake_case_ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowercase__ ( snake_case_ :str ): return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" _lowercase : int = input('Enter numbers separated by a comma:\n').strip() _lowercase : int = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
332
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A__ ( _lowerCAmelCase): A_ : torch.FloatTensor A_ : torch.FloatTensor A_ : Optional[torch.FloatTensor] = None class A__ ( _lowerCAmelCase , _lowerCAmelCase): A_ : int = 2 @register_to_config def __init__( self , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 1.007 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.05 , _SCREAMING_SNAKE_CASE = 50 , ): # standard deviation of the initial noise distribution __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : Any = None __lowerCAmelCase : List[Any] = None __lowerCAmelCase : int = None # sigma(t_i) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): return sample def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : Optional[int] = num_inference_steps __lowerCAmelCase : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[int] = torch.from_numpy(_lowercase ).to(_lowercase ) __lowerCAmelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : List[str] = torch.tensor(_lowercase , dtype=torch.floataa , device=_lowercase ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : Tuple = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=_lowercase ).to(sample.device ) __lowerCAmelCase : Tuple = sigma + gamma * sigma __lowerCAmelCase : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ): __lowerCAmelCase : List[Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : Any = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ): __lowerCAmelCase : Any = sample_prev + sigma_prev * model_output __lowerCAmelCase : int = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise NotImplementedError()
86
"""simple docstring""" from collections import defaultdict def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = first_str.lower().strip() __UpperCAmelCase = second_str.lower().strip() # Remove whitespace __UpperCAmelCase = first_str.replace(''' ''' , '''''' ) __UpperCAmelCase = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(snake_case_ ) != len(snake_case_ ): return False # Default values for count should be 0 __UpperCAmelCase = defaultdict(snake_case_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(snake_case_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _lowercase : List[Any] = input('Enter the first string ').strip() _lowercase : Tuple = input('Enter the second string ').strip() _lowercase : str = check_anagrams(input_a, input_b) print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
332
0
from __future__ import annotations def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' create_state_space_tree(snake_case_, [], 0, [0 for i in range(len(snake_case_ ) )] ) def lowerCAmelCase_ ( __A, __A, __A, __A, ) -> Any: '''simple docstring''' if index == len(snake_case_ ): print(snake_case_ ) return for i in range(len(snake_case_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) UpperCAmelCase__ = True create_state_space_tree(snake_case_, snake_case_, index + 1, snake_case_ ) current_sequence.pop() UpperCAmelCase__ = False UpperCamelCase__ = [3, 1, 2, 4] generate_all_permutations(sequence) UpperCamelCase__ = ["A", "B", "C"] generate_all_permutations(sequence_a)
65
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Union[str, Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ): __UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) # set architectures equal to `None` __UpperCAmelCase = None __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Tuple ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Any ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def a ( self : List[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : str ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
332
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : str=7 , lowerCAmelCase : int=True , lowerCAmelCase : str=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : str=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=None , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def __lowercase ( self : str ): lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __lowercase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , *lowerCAmelCase : Tuple ): lowerCAmelCase = OpenAIGPTModel(config=_lowercase ) model.to(_lowercase ) model.eval() lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase ) lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase ) lowerCAmelCase = model(_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , *lowerCAmelCase : List[Any] ): lowerCAmelCase = OpenAIGPTLMHeadModel(_lowercase ) model.to(_lowercase ) model.eval() lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , *lowerCAmelCase : Dict ): lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_lowercase ) model.to(_lowercase ) model.eval() lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , *lowerCAmelCase : Any ): lowerCAmelCase = self.num_labels lowerCAmelCase = OpenAIGPTForSequenceClassification(_lowercase ) model.to(_lowercase ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): _a = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _a = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _a = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __lowercase ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=False ): lowerCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase , ) lowerCAmelCase = inputs_dict["""labels"""] lowerCAmelCase = inputs_dict["""labels"""] lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowercase , ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowercase ) return inputs_dict def __lowercase ( self : Optional[Any] ): lowerCAmelCase = OpenAIGPTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 ) def __lowercase ( self : Optional[int] ): self.config_tester.run_common_tests() def __lowercase ( self : int ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_lowercase ) def __lowercase ( self : Any ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_lowercase ) def __lowercase ( self : int ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_lowercase ) def __lowercase ( self : Optional[Any] ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowercase ) @slow def __lowercase ( self : Union[str, Any] ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = OpenAIGPTModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def __lowercase ( self : str ): lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_lowercase ) lowerCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_lowercase ) # the president is lowerCAmelCase = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase = model.generate(_lowercase , do_sample=_lowercase ) self.assertListEqual(output_ids[0].tolist() , _lowercase )
155
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _UpperCAmelCase ( _lowerCAmelCase ): def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ): if tokenize_kwargs is None: __UpperCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __UpperCAmelCase = truncation __UpperCAmelCase = tokenize_kwargs __UpperCAmelCase = {} if return_tensors is not None: __UpperCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): __UpperCAmelCase = self.framework __UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) return model_inputs def a ( self : List[str] , _lowercase : Tuple ): __UpperCAmelCase = self.model(**_lowercase ) return model_outputs def a ( self : int , _lowercase : Tuple , _lowercase : str=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): return super().__call__(*_lowercase , **_lowercase )
332
0
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _UpperCamelCase : str = 'bert-base-cased' _UpperCamelCase : Any = 'google/pegasus-xsum' _UpperCamelCase : Any = [' Sam ate lunch today.', 'Sams lunch ingredients.'] _UpperCamelCase : List[str] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] _UpperCamelCase : Any = 'patrickvonplaten/t5-tiny-random' _UpperCamelCase : str = 'sshleifer/bart-tiny-random' _UpperCamelCase : Dict = 'sshleifer/tiny-mbart' _UpperCamelCase : Dict = 'sshleifer/tiny-marian-en-de' def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ): '''simple docstring''' lowercase = '\n'.join(snake_case_ ) Path(snake_case_ ).open('w' ).writelines(snake_case_ ) def _SCREAMING_SNAKE_CASE ( __snake_case : Any ): '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(snake_case_ , f'{split}.source' ) , snake_case_ ) _dump_articles(os.path.join(snake_case_ , f'{split}.target' ) , snake_case_ ) return tmp_dir class a ( _lowerCAmelCase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = AutoTokenizer.from_pretrained(_lowercase ) lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES ) lowercase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES ) lowercase = 4 lowercase = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowercase , lowercase = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. lowercase = SeqaSeqDataset( _lowercase , data_dir=_lowercase , type_path='train' , max_source_length=_lowercase , max_target_length=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , ) lowercase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_lowercase , _lowercase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowercase = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = AutoTokenizer.from_pretrained(_lowercase ) lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES ) lowercase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES ) lowercase = 4 lowercase = LegacySeqaSeqDataset( _lowercase , data_dir=_lowercase , type_path='train' , max_source_length=2_0 , max_target_length=_lowercase , ) lowercase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def UpperCamelCase_ ( self ): lowercase = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowercase = tmp_dir.joinpath('train.source' ).open().readlines() lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_lowercase , _lowercase , 1_2_8 , _lowercase ) lowercase = {x.name for x in tmp_dir.iterdir()} lowercase = {x.name for x in save_dir.iterdir()} lowercase = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_lowercase ) < len(_lowercase ) assert len(_lowercase ) == 1 assert len(packed_examples[0] ) == sum(len(_lowercase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def UpperCamelCase_ ( self ): if not FAIRSEQ_AVAILABLE: return lowercase , lowercase , lowercase = self._get_dataset(max_len=6_4 ) lowercase = 6_4 lowercase = ds.make_dynamic_sampler(_lowercase , required_batch_size_multiple=_lowercase ) lowercase = [len(_lowercase ) for x in batch_sampler] assert len(set(_lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_lowercase ) == len(_lowercase ) # no dropped or added examples lowercase = DataLoader(_lowercase , batch_sampler=_lowercase , collate_fn=ds.collate_fn , num_workers=2 ) lowercase = [] lowercase = [] for batch in data_loader: lowercase = batch['input_ids'].shape lowercase = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowercase = np.product(batch['input_ids'].shape ) num_src_per_batch.append(_lowercase ) if num_src_tokens > (max_tokens * 1.1): failures.append(_lowercase ) assert num_src_per_batch[0] == max(_lowercase ) if failures: raise AssertionError(F'too many tokens in {len(_lowercase )} batches' ) def UpperCamelCase_ ( self ): lowercase , lowercase , lowercase = self._get_dataset(max_len=5_1_2 ) lowercase = 2 lowercase = ds.make_sortish_sampler(_lowercase , shuffle=_lowercase ) lowercase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 ) lowercase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_lowercase ) lowercase = tokenizer.pad_token_id def count_pad_tokens(_lowerCamelCase , _lowerCamelCase="input_ids" ): return [batch[k].eq(_lowercase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_lowercase , k='labels' ) ) < sum(count_pad_tokens(_lowercase , k='labels' ) ) assert sum(count_pad_tokens(_lowercase ) ) < sum(count_pad_tokens(_lowercase ) ) assert len(_lowercase ) == len(_lowercase ) def UpperCamelCase_ ( self , _lowerCamelCase=1_0_0_0 , _lowerCamelCase=1_2_8 ): if os.getenv('USE_REAL_DATA' , _lowercase ): lowercase = 'examples/seq2seq/wmt_en_ro' lowercase = max_len * 2 * 6_4 if not Path(_lowercase ).joinpath('train.len' ).exists(): save_len_file(_lowercase , _lowercase ) else: lowercase = 'examples/seq2seq/test_data/wmt_en_ro' lowercase = max_len * 4 save_len_file(_lowercase , _lowercase ) lowercase = AutoTokenizer.from_pretrained(_lowercase ) lowercase = SeqaSeqDataset( _lowercase , data_dir=_lowercase , type_path='train' , max_source_length=_lowercase , max_target_length=_lowercase , n_obs=_lowercase , ) return ds, max_tokens, tokenizer def UpperCamelCase_ ( self ): lowercase , lowercase , lowercase = self._get_dataset() lowercase = set(DistributedSortishSampler(_lowercase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_lowercase ) ) lowercase = set(DistributedSortishSampler(_lowercase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_lowercase ) ) assert idsa.intersection(_lowercase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = AutoTokenizer.from_pretrained(_lowercase , use_fast=_lowercase ) if tok_name == MBART_TINY: lowercase = SeqaSeqDataset( _lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) lowercase = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowercase = SeqaSeqDataset( _lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) lowercase = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_lowercase ) == 1 if tok_name == BART_TINY else len(_lowercase ) == 0
220
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowercase : Union[str, Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowercase__ ( snake_case_ :List[Any] ): if isinstance(snake_case_ , torch.Tensor ): return image elif isinstance(snake_case_ , PIL.Image.Image ): __UpperCAmelCase = [image] __UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image] __UpperCAmelCase = torch.stack(snake_case_ ) return image class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Any , _lowercase : str , _lowercase : str ): super().__init__() # make sure scheduler can always be converted to DDIM __UpperCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowercase , scheduler=_lowercase ) def a ( self : int , _lowercase : List[str] ): if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): # get the original timestep using init_timestep __UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase ) __UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ): if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' ) __UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __UpperCAmelCase = init_latents.shape __UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents print('''add noise to latents at timestep''' , _lowercase ) __UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = init_latents return latents @torch.no_grad() def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ): self.check_inputs(_lowercase ) # 2. Preprocess image __UpperCAmelCase = preprocess(_lowercase ) # 3. set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) __UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device ) __UpperCAmelCase = timesteps[:1].repeat(_lowercase ) # 4. Prepare latent variables __UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase ) __UpperCAmelCase = latents # 5. Denoising loop for t in self.progress_bar(_lowercase ): # 1. predict noise model_output __UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step( _lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowercase )
332
0
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __a ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=4 , ) -> int: '''simple docstring''' lowercase__: List[str] = parent lowercase__: List[Any] = batch_size lowercase__: Optional[int] = seq_length lowercase__: Optional[int] = is_training lowercase__: List[str] = use_attention_mask lowercase__: List[str] = use_token_type_ids lowercase__: Any = use_labels lowercase__: Any = vocab_size lowercase__: List[Any] = hidden_size lowercase__: List[str] = num_hidden_layers lowercase__: List[Any] = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: str = hidden_act lowercase__: Optional[Any] = hidden_dropout_prob lowercase__: Optional[Any] = attention_probs_dropout_prob lowercase__: int = max_position_embeddings lowercase__: Dict = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Tuple = initializer_range lowercase__: int = num_choices def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Tuple = None if self.use_attention_mask: lowercase__: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[str] = None if self.use_token_type_ids: lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: List[str] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: List[str] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__: Optional[Any] = config_and_inputs lowercase__: List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __a ( _lowerCAmelCase , unittest.TestCase ): __lowercase : Union[str, Any] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: str = FlaxAlbertModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__: Dict = model_class_name.from_pretrained('albert-base-v2' ) lowercase__: List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowercase ) @require_flax class __a ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Any = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__: List[str] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) lowercase__: Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__: str = model(_lowercase , attention_mask=_lowercase )[0] lowercase__: Optional[Any] = (1, 11, 768) self.assertEqual(output.shape , _lowercase ) lowercase__: str = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
196
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Union[str, Any] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
332
0
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( 'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion' ) __a :Optional[int] = None __a :Optional[Any] = { '7B': 1_1008, '13B': 1_3824, '30B': 1_7920, '65B': 2_2016, '70B': 2_8672, } __a :Tuple = { '7B': 1, '7Bf': 1, '13B': 2, '13Bf': 2, '30B': 4, '65B': 8, '70B': 8, '70Bf': 8, } def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any]=1 ,__UpperCamelCase : str=256 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(snake_case_ ,"r" ) as f: return json.load(snake_case_ ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ): """simple docstring""" with open(snake_case_ ,"w" ) as f: json.dump(snake_case_ ,snake_case_ ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=True ): """simple docstring""" os.makedirs(snake_case_ ,exist_ok=snake_case_ ) A_ = os.path.join(snake_case_ ,"tmp" ) os.makedirs(snake_case_ ,exist_ok=snake_case_ ) A_ = read_json(os.path.join(snake_case_ ,"params.json" ) ) A_ = NUM_SHARDS[model_size] A_ = params["n_layers"] A_ = params["n_heads"] A_ = n_heads // num_shards A_ = params["dim"] A_ = dim // n_heads A_ = 1_0000.0 A_ = 1.0 / (base ** (torch.arange(0 ,snake_case_ ,2 ).float() / dims_per_head)) if "n_kv_heads" in params: A_ = params["n_kv_heads"] # for GQA / MQA A_ = n_heads_per_shard // num_key_value_heads A_ = dim // num_key_value_heads else: # compatibility with other checkpoints A_ = n_heads A_ = n_heads_per_shard A_ = dim # permute for sliced rotary def permute(__UpperCamelCase : int ,__UpperCamelCase : List[str]=n_heads ,__UpperCamelCase : List[Any]=dim ,__UpperCamelCase : Dict=dim ): return w.view(snake_case_ ,dima // n_heads // 2 ,2 ,snake_case_ ).transpose(1 ,2 ).reshape(snake_case_ ,snake_case_ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) A_ = torch.load(os.path.join(snake_case_ ,"consolidated.00.pth" ) ,map_location="cpu" ) else: # Sharded A_ = [ torch.load(os.path.join(snake_case_ ,f'''consolidated.{i:02d}.pth''' ) ,map_location="cpu" ) for i in range(snake_case_ ) ] A_ = 0 A_ = {"weight_map": {}} for layer_i in range(snake_case_ ): A_ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded A_ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. A_ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } A_ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(snake_case_ ,snake_case_ ,snake_case_ ) for i in range(snake_case_ ) ] ,dim=0 ,).reshape(snake_case_ ,snake_case_ ) ) A_ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( snake_case_ ,snake_case_ ,snake_case_ ) for i in range(snake_case_ ) ] ,dim=0 ,).reshape(snake_case_ ,snake_case_ ) ,snake_case_ ,snake_case_ ,snake_case_ ,) A_ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( snake_case_ ,snake_case_ ,snake_case_ ) for i in range(snake_case_ ) ] ,dim=0 ,).reshape(snake_case_ ,snake_case_ ) A_ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case_ )] ,dim=1 ) A_ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case_ )] ,dim=0 ) A_ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case_ )] ,dim=1 ) A_ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case_ )] ,dim=0 ) A_ = inv_freq for k, v in state_dict.items(): A_ = filename param_count += v.numel() torch.save(snake_case_ ,os.path.join(snake_case_ ,snake_case_ ) ) A_ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded A_ = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: A_ = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(snake_case_ )] ,dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(snake_case_ )] ,dim=0 ), } for k, v in state_dict.items(): A_ = filename param_count += v.numel() torch.save(snake_case_ ,os.path.join(snake_case_ ,snake_case_ ) ) # Write configs A_ = {"total_size": param_count * 2} write_json(snake_case_ ,os.path.join(snake_case_ ,"pytorch_model.bin.index.json" ) ) A_ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 A_ = params["multiple_of"] if "multiple_of" in params else 256 A_ = LlamaConfig( hidden_size=snake_case_ ,intermediate_size=compute_intermediate_size(snake_case_ ,snake_case_ ,snake_case_ ) ,num_attention_heads=params["n_heads"] ,num_hidden_layers=params["n_layers"] ,rms_norm_eps=params["norm_eps"] ,num_key_value_heads=snake_case_ ,) config.save_pretrained(snake_case_ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) A_ = LlamaForCausalLM.from_pretrained(snake_case_ ,torch_dtype=torch.floataa ,low_cpu_mem_usage=snake_case_ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(snake_case_ ,safe_serialization=snake_case_ ) shutil.rmtree(snake_case_ ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) A_ = tokenizer_class(snake_case_ ) tokenizer.save_pretrained(snake_case_ ) def __snake_case ( ): """simple docstring""" A_ = argparse.ArgumentParser() parser.add_argument( "--input_dir" ,help="Location of LLaMA weights, which contains tokenizer.model and model folders" ,) parser.add_argument( "--model_size" ,choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] ,) parser.add_argument( "--output_dir" ,help="Location to write HF model and tokenizer" ,) parser.add_argument("--safe_serialization" ,type=snake_case_ ,help="Whether or not to save using `safetensors`." ) A_ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,) A_ = os.path.join(args.input_dir ,"tokenizer.model" ) write_tokenizer(args.output_dir ,snake_case_ ) if __name__ == "__main__": main()
312
"""simple docstring""" _lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , *snake_case_ : Any , **snake_case_ : List[Any] ): warnings.warn( """The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use VideoMAEImageProcessor instead.""" , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
35
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase__ ( snake_case_ :Optional[int] ): return EnvironmentCommand() def lowercase__ ( snake_case_ :List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class _UpperCAmelCase ( _lowerCAmelCase ): @staticmethod def a ( _lowercase : ArgumentParser ): __UpperCAmelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=_lowercase ) download_parser.add_argument( '''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_lowercase ) def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ): __UpperCAmelCase = accelerate_config_file def a ( self : Dict ): __UpperCAmelCase = '''not installed''' if is_safetensors_available(): import safetensors __UpperCAmelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors __UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = __UpperCAmelCase = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __UpperCAmelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_lowercase ): __UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict() __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_lowercase , _lowercase ) else F'''\t{accelerate_config}''' ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_torch_available(): import torch __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_tf_available(): import tensorflow as tf __UpperCAmelCase = tf.__version__ try: # deprecated in v2.1 __UpperCAmelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_flax_available(): import flax import jax import jaxlib __UpperCAmelCase = flax.__version__ __UpperCAmelCase = jax.__version__ __UpperCAmelCase = jaxlib.__version__ __UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform __UpperCAmelCase = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_lowercase ) ) return info @staticmethod def a ( _lowercase : str ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
332
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None , _lowerCamelCase : dict[str, float] | None = None , _lowerCamelCase : bool = False , ) -> int: '''simple docstring''' __UpperCamelCase : Dict = cipher_alphabet or [chr(snake_case_) for i in range(97 , 123)] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __UpperCamelCase : Optional[Any] = { "a": 0.0_8_4_9_7, "b": 0.0_1_4_9_2, "c": 0.0_2_2_0_2, "d": 0.0_4_2_5_3, "e": 0.1_1_1_6_2, "f": 0.0_2_2_2_8, "g": 0.0_2_0_1_5, "h": 0.0_6_0_9_4, "i": 0.0_7_5_4_6, "j": 0.0_0_1_5_3, "k": 0.0_1_2_9_2, "l": 0.0_4_0_2_5, "m": 0.0_2_4_0_6, "n": 0.0_6_7_4_9, "o": 0.0_7_5_0_7, "p": 0.0_1_9_2_9, "q": 0.0_0_0_9_5, "r": 0.0_7_5_8_7, "s": 0.0_6_3_2_7, "t": 0.0_9_3_5_6, "u": 0.0_2_7_5_8, "v": 0.0_0_9_7_8, "w": 0.0_2_5_6_0, "x": 0.0_0_1_5_0, "y": 0.0_1_9_9_4, "z": 0.0_0_0_7_7, } else: # Custom frequencies dictionary __UpperCamelCase : Any = frequencies_dict if not case_sensitive: __UpperCamelCase : int = ciphertext.lower() # Chi squared statistic values __UpperCamelCase : Any = {} # cycle through all of the shifts for shift in range(len(snake_case_)): __UpperCamelCase : int = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __UpperCamelCase : Optional[int] = (alphabet_letters.index(letter.lower()) - shift) % len( snake_case_) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __UpperCamelCase : List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __UpperCamelCase : List[Any] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase : str = decrypted_with_shift.lower().count(snake_case_) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase : Any = frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase : Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase : Tuple = decrypted_with_shift.count(snake_case_) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase : List[str] = frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase : List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __UpperCamelCase : Tuple = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_lowerCamelCase : int) -> tuple[float, str]: return chi_squared_statistic_values[key] __UpperCamelCase : Union[str, Any] = min( snake_case_ , key=snake_case_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
232
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ): __UpperCAmelCase = sorted(numsa + numsa ) __UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()] _lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
332
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __lowerCAmelCase : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) __lowerCAmelCase : List[Any] = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __lowerCAmelCase : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6000, '''return_attention_mask''': False, '''do_normalize''': True, } __lowerCAmelCase : List[str] = tempfile.mkdtemp() __lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , _lowercase ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) # load decoder from hub __lowerCAmelCase : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def UpperCamelCase__ ( self , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.add_kwargs_tokens_map.copy() kwargs.update(_lowercase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCamelCase__ ( self , **A_ ) ->Optional[int]: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCamelCase__ ( self , **A_ ) ->Optional[Any]: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Tuple = self.get_feature_extractor() __lowerCAmelCase : Union[str, Any] = self.get_decoder() __lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowercase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowercase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _lowercase ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_lowercase , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_feature_extractor() __lowerCAmelCase : Optional[int] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = self.get_decoder() __lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : List[str] = floats_list((3, 1000) ) __lowerCAmelCase : Any = feature_extractor(_lowercase , return_tensors='''np''' ) __lowerCAmelCase : str = processor(_lowercase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : int = self.get_feature_extractor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_decoder() __lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : Optional[Any] = '''This is a test string''' __lowerCAmelCase : Optional[int] = processor(text=_lowercase ) __lowerCAmelCase : Optional[Any] = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self , A_=(2, 10, 16) , A_=77 ) ->int: '''simple docstring''' np.random.seed(_lowercase ) return np.random.rand(*_lowercase ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.get_feature_extractor() __lowerCAmelCase : str = self.get_tokenizer() __lowerCAmelCase : int = self.get_decoder() __lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : List[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __lowerCAmelCase : Optional[int] = processor.decode(_lowercase ) __lowerCAmelCase : Dict = decoder.decode_beams(_lowercase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def UpperCamelCase__ ( self , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : int = self.get_feature_extractor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_decoder() __lowerCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : Any = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __lowerCAmelCase : Optional[Any] = processor.batch_decode(_lowercase ) else: with get_context(_lowercase ).Pool() as pool: __lowerCAmelCase : int = processor.batch_decode(_lowercase , _lowercase ) __lowerCAmelCase : int = list(_lowercase ) with get_context('''fork''' ).Pool() as p: __lowerCAmelCase : Tuple = decoder.decode_beams_batch(_lowercase , _lowercase ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : str = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_lowercase , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_lowercase , decoded_processor.logit_score ) self.assertListEqual(_lowercase , decoded_processor.lm_score ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.get_feature_extractor() __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : int = self.get_decoder() __lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : int = self._get_dummy_logits() __lowerCAmelCase : Tuple = 15 __lowerCAmelCase : int = -20.0 __lowerCAmelCase : Tuple = -4.0 __lowerCAmelCase : List[str] = processor.batch_decode( _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) __lowerCAmelCase : Dict = decoded_processor_out.text __lowerCAmelCase : Optional[int] = list(_lowercase ) with get_context('''fork''' ).Pool() as pool: __lowerCAmelCase : Optional[int] = decoder.decode_beams_batch( _lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) __lowerCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out] __lowerCAmelCase : Any = [d[0][2] for d in decoded_decoder_out] __lowerCAmelCase : int = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1e-3 ) ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , _lowercase , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_feature_extractor() __lowerCAmelCase : List[Any] = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_decoder() __lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __lowerCAmelCase : Dict = self._get_dummy_logits() __lowerCAmelCase : List[str] = 2.0 __lowerCAmelCase : Dict = 5.0 __lowerCAmelCase : int = -20.0 __lowerCAmelCase : Dict = True __lowerCAmelCase : List[Any] = processor.batch_decode( _lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) __lowerCAmelCase : Optional[int] = decoded_processor_out.text __lowerCAmelCase : Optional[Any] = list(_lowercase ) decoder.reset_params( alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) with get_context('''fork''' ).Pool() as pool: __lowerCAmelCase : str = decoder.decode_beams_batch( _lowercase , _lowercase , ) __lowerCAmelCase : int = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase ) __lowerCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _lowercase ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Dict = processor.decoder.model_container[processor.decoder._model_key] __lowerCAmelCase : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __lowerCAmelCase : Any = os.listdir(_lowercase ) __lowerCAmelCase : Optional[int] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_lowercase , _lowercase ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : int = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(_lowercase ) __lowerCAmelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] __lowerCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __lowerCAmelCase : List[str] = os.listdir(_lowercase ) __lowerCAmelCase : Tuple = os.listdir(_lowercase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_lowercase , _lowercase ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : int = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : List[Any] = floats_list((3, 1000) ) __lowerCAmelCase : Union[str, Any] = processor_wavaveca(_lowercase , return_tensors='''np''' ) __lowerCAmelCase : int = processor_auto(_lowercase , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) __lowerCAmelCase : str = self._get_dummy_logits() __lowerCAmelCase : Optional[Any] = processor_wavaveca.batch_decode(_lowercase ) __lowerCAmelCase : Tuple = processor_auto.batch_decode(_lowercase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_feature_extractor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_decoder() __lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def UpperCamelCase__ ( A_ , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = [d[key] for d in offsets] return retrieved_list def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : List[Any] = self._get_dummy_logits()[0] __lowerCAmelCase : str = processor.decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Dict = self._get_dummy_logits() __lowerCAmelCase : Optional[Any] = processor.batch_decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' import torch __lowerCAmelCase : Optional[Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase ) __lowerCAmelCase : Optional[int] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) __lowerCAmelCase : int = iter(_lowercase ) __lowerCAmelCase : int = next(_lowercase ) __lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __lowerCAmelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __lowerCAmelCase : List[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(_lowercase ).logits.cpu().numpy() __lowerCAmelCase : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_lowercase ) __lowerCAmelCase : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __lowerCAmelCase : List[str] = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __lowerCAmelCase : List[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase ) self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text ) # output times __lowerCAmelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) ) __lowerCAmelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) ) # fmt: off __lowerCAmelCase : Any = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) __lowerCAmelCase : int = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) ) self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
275
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ): __UpperCAmelCase = str(id_ ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = [] __UpperCAmelCase = {} # {vertex:distance} def __lt__( self : str , _lowercase : List[Any] ): return self.key < other.key def __repr__( self : int ): return self.id def a ( self : Union[str, Any] , _lowercase : int ): self.neighbors.append(_lowercase ) def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): __UpperCAmelCase = weight def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): __UpperCAmelCase = [] for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = graph[:] while q: __UpperCAmelCase = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = list(snake_case_ ) hq.heapify(snake_case_ ) while h: __UpperCAmelCase = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase__ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar a_ = TypeVar('KT') a_ = TypeVar('VT') class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ): def __init__( self : Optional[Any] , __lowercase : KT | str = "root" , __lowercase : VT | None = None ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =key SCREAMING_SNAKE_CASE__ : Union[str, Any] =value SCREAMING_SNAKE_CASE__ : Optional[Any] =[] def __repr__( self : Union[str, Any] ) -> Any: return F"Node({self.key}: {self.value})" @property def __magic_name__ ( self : Optional[int] ) -> Dict: return len(self.forward ) class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ): def __init__( self : Union[str, Any] , __lowercase : float = 0.5 , __lowercase : int = 16 ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =Node[KT, VT]() SCREAMING_SNAKE_CASE__ : List[Any] =0 SCREAMING_SNAKE_CASE__ : List[str] =p SCREAMING_SNAKE_CASE__ : Dict =max_level def __str__( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(self ) if len(_lowercase ) == 0: return F"SkipList(level={self.level})" SCREAMING_SNAKE_CASE__ : List[str] =max((len(str(_lowercase ) ) for item in items) , default=4 ) SCREAMING_SNAKE_CASE__ : Any =max(_lowercase , 4 ) + 4 SCREAMING_SNAKE_CASE__ : List[Any] =self.head SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : Dict =node.forward.copy() lines.append(F"[{node.key}]".ljust(_lowercase , '''-''' ) + '''* ''' * len(_lowercase ) ) lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) ) while len(node.forward ) != 0: SCREAMING_SNAKE_CASE__ : Any =node.forward[0] lines.append( F"[{node.key}]".ljust(_lowercase , '''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Dict =node.forward lines.append('''None'''.ljust(_lowercase ) + '''* ''' * len(_lowercase ) ) return F"SkipList(level={self.level})\n" + "\n".join(_lowercase ) def __iter__( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.head while len(node.forward ) != 0: yield node.forward[0].key SCREAMING_SNAKE_CASE__ : Tuple =node.forward[0] def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Any =1 while random() < self.p and level < self.max_level: level += 1 return level def __magic_name__ ( self : List[str] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : List[str] =self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: SCREAMING_SNAKE_CASE__ : Tuple =node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(_lowercase ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __magic_name__ ( self : str , __lowercase : KT ) -> Optional[int]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(_lowercase ) if node is not None: for i, update_node in enumerate(_lowercase ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: SCREAMING_SNAKE_CASE__ : int =node.forward[i] else: SCREAMING_SNAKE_CASE__ : List[str] =update_node.forward[:i] def __magic_name__ ( self : Any , __lowercase : KT , __lowercase : VT ) -> str: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self._locate_node(_lowercase ) if node is not None: SCREAMING_SNAKE_CASE__ : int =value else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , _lowercase ): update_vector.append(self.head ) SCREAMING_SNAKE_CASE__ : int =level SCREAMING_SNAKE_CASE__ : str =Node(_lowercase , _lowercase ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(_lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[int] =new_node def __magic_name__ ( self : Optional[Any] , __lowercase : VT ) -> Dict: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(_lowercase ) if node is not None: return node.value return None def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =SkipList() skip_list.insert('''Key1''', 3 ) skip_list.insert('''Key2''', 1_2 ) skip_list.insert('''Key3''', 4_1 ) skip_list.insert('''Key4''', -1_9 ) SCREAMING_SNAKE_CASE__ : Tuple =skip_list.head SCREAMING_SNAKE_CASE__ : Union[str, Any] ={} while node.level != 0: SCREAMING_SNAKE_CASE__ : int =node.forward[0] SCREAMING_SNAKE_CASE__ : Dict =node.value assert len(snake_case_ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =SkipList() skip_list.insert('''Key1''', 1_0 ) skip_list.insert('''Key1''', 1_2 ) skip_list.insert('''Key5''', 7 ) skip_list.insert('''Key7''', 1_0 ) skip_list.insert('''Key10''', 5 ) skip_list.insert('''Key7''', 7 ) skip_list.insert('''Key5''', 5 ) skip_list.insert('''Key10''', 1_0 ) SCREAMING_SNAKE_CASE__ : Dict =skip_list.head SCREAMING_SNAKE_CASE__ : Dict ={} while node.level != 0: SCREAMING_SNAKE_CASE__ : int =node.forward[0] SCREAMING_SNAKE_CASE__ : Optional[int] =node.value if len(snake_case_ ) != 4: print() assert len(snake_case_ ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =SkipList() assert skip_list.find('''Some key''' ) is None def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =SkipList() skip_list.insert('''Key2''', 2_0 ) assert skip_list.find('''Key2''' ) == 2_0 skip_list.insert('''Some Key''', 1_0 ) skip_list.insert('''Key2''', 8 ) skip_list.insert('''V''', 1_3 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 1_0 assert skip_list.find('''V''' ) == 1_3 def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =SkipList() skip_list.insert('''Key1''', 1_2 ) skip_list.insert('''V''', 1_3 ) skip_list.insert('''X''', 1_4 ) skip_list.insert('''Key2''', 1_5 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =SkipList() skip_list.insert('''Key1''', 1_2 ) skip_list.insert('''V''', 1_3 ) skip_list.insert('''X''', 1_4 ) skip_list.insert('''Key2''', 1_5 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 1_4 assert skip_list.find('''Key1''' ) == 1_2 assert skip_list.find('''Key2''' ) == 1_5 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 1_2 assert skip_list.find('''Key2''' ) == 1_5 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 1_5 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =SkipList() skip_list.insert('''Key1''', 1_2 ) skip_list.insert('''V''', 1_3 ) skip_list.insert('''X''', 1_4_2 ) skip_list.insert('''Key2''', 1_5 ) skip_list.delete('''X''' ) def traverse_keys(UpperCamelCase__ : int ): yield node.key for forward_node in node.forward: yield from traverse_keys(snake_case_ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def _a( ): '''simple docstring''' def is_sorted(UpperCamelCase__ : Dict ): return all(next_item >= item for item, next_item in zip(snake_case_, lst[1:] ) ) SCREAMING_SNAKE_CASE__ : List[Any] =SkipList() for i in range(1_0 ): skip_list.insert(snake_case_, snake_case_ ) assert is_sorted(list(snake_case_ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(snake_case_ ) ) skip_list.insert(-1_2, -1_2 ) skip_list.insert(7_7, 7_7 ) assert is_sorted(list(snake_case_ ) ) def _a( ): '''simple docstring''' for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =SkipList() skip_list.insert(2, '''2''' ) skip_list.insert(4, '''4''' ) skip_list.insert(6, '''4''' ) skip_list.insert(4, '''5''' ) skip_list.insert(8, '''4''' ) skip_list.insert(9, '''4''' ) skip_list.delete(4 ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
152
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Dict = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "swinv2" a__ : List[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ): super().__init__(**_lowercase ) __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = len(_lowercase ) __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) __UpperCAmelCase = (0, 0, 0, 0)
332
0
"""simple docstring""" def _A (__a , __a ) -> Dict: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) ) else: return a * actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) ) def _A (__a , __a ) -> List[Any]: """simple docstring""" if b < 0: return 1 / actual_power(snake_case_ , snake_case_ ) return actual_power(snake_case_ , snake_case_ ) if __name__ == "__main__": print(power(-2, -3))
91
"""simple docstring""" import pprint import requests _lowercase : Optional[Any] = 'https://zenquotes.io/api' def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": _lowercase : int = random_quotes() pprint.pprint(response)
332
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : int = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) __lowerCAmelCase : Dict = MaskFormerConfig(backbone_config=snake_case_ ) __lowerCAmelCase : int = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok __lowerCAmelCase : List[Any] = 847 __lowerCAmelCase : List[str] = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok __lowerCAmelCase : Union[str, Any] = 150 __lowerCAmelCase : Any = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok __lowerCAmelCase : List[str] = 171 __lowerCAmelCase : Union[str, Any] = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO __lowerCAmelCase : Any = 133 __lowerCAmelCase : List[str] = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok __lowerCAmelCase : Tuple = 19 __lowerCAmelCase : Optional[int] = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok __lowerCAmelCase : str = 65 __lowerCAmelCase : Union[str, Any] = 'mapillary-vistas-id2label.json' __lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='dataset' ) , 'r' ) ) __lowerCAmelCase : int = {int(snake_case_ ): v for k, v in idalabel.items()} return config def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[Any] = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") ) rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") ) rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") ) rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") ) rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") ) rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") ) rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") ) # cross-attention out projection rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") ) # MLP 1 rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") ) # MLP 2 rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") ) # layernorm 3 (final layernorm) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") ) rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") ) rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") ) # fmt: on return rename_keys def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = dct.pop(snake_case_ ) __lowerCAmelCase : Union[str, Any] = val def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCAmelCase : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase : Tuple = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" ) __lowerCAmelCase : List[str] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : int = in_proj_weight[:dim, :] __lowerCAmelCase : List[Any] = in_proj_bias[: dim] __lowerCAmelCase : Optional[Any] = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase : Optional[int] = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase : Optional[int] = in_proj_weight[ -dim :, : ] __lowerCAmelCase : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): # fmt: off __lowerCAmelCase : int = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase : List[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" ) __lowerCAmelCase : Optional[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : Union[str, Any] = in_proj_weight[: hidden_size, :] __lowerCAmelCase : Optional[int] = in_proj_bias[:config.hidden_size] __lowerCAmelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase : int = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase : List[Any] = in_proj_weight[-hidden_size :, :] __lowerCAmelCase : Optional[Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase : Optional[int] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" ) __lowerCAmelCase : str = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : Optional[int] = in_proj_weight[: hidden_size, :] __lowerCAmelCase : Any = in_proj_bias[:config.hidden_size] __lowerCAmelCase : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase : int = in_proj_weight[-hidden_size :, :] __lowerCAmelCase : List[Any] = in_proj_bias[-hidden_size :] # fmt: on def __lowerCAmelCase (): __lowerCAmelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase : Dict = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ): __lowerCAmelCase : Any = get_maskformer_config(snake_case_ ) # load original state_dict with open(snake_case_ , 'rb' ) as f: __lowerCAmelCase : int = pickle.load(snake_case_ ) __lowerCAmelCase : Dict = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowerCAmelCase : Any = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # update to torch tensors for key, value in state_dict.items(): __lowerCAmelCase : List[Any] = torch.from_numpy(snake_case_ ) # load 🤗 model __lowerCAmelCase : Optional[Any] = MaskFormerForInstanceSegmentation(snake_case_ ) model.eval() for name, param in model.named_parameters(): print(snake_case_ , param.shape ) __lowerCAmelCase , __lowerCAmelCase : List[str] = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(snake_case_ ) == 0, F"Unexpected keys: {unexpected_keys}" # verify results __lowerCAmelCase : List[Any] = prepare_img() if "vistas" in model_name: __lowerCAmelCase : str = 65 elif "cityscapes" in model_name: __lowerCAmelCase : Any = 6_5535 else: __lowerCAmelCase : List[str] = 255 __lowerCAmelCase : Union[str, Any] = True if 'ade' in model_name else False __lowerCAmelCase : Tuple = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ ) __lowerCAmelCase : Dict = image_processor(snake_case_ , return_tensors='pt' ) __lowerCAmelCase : Dict = model(**snake_case_ ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowerCAmelCase : str = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"Saving model and image processor to {pytorch_dump_folder_path}" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F"nielsr/{model_name}" ) image_processor.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you\'d like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
86
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A ( _lowerCAmelCase , unittest.TestCase ): # TODO: is there an appropriate internal test set? __UpperCAmelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx" def lowercase_ (self : Any , __UpperCAmelCase : Optional[int]=0 ) -> Any: """simple docstring""" UpperCAmelCase__ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_lowercase ) ) UpperCAmelCase__ = torch.manual_seed(_lowercase ) UpperCAmelCase__ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase_ (self : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = self.get_dummy_inputs() UpperCAmelCase__ = pipe(**_lowercase ).images UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def lowercase_ (self : str ) -> int: """simple docstring""" UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = self.get_dummy_inputs() UpperCAmelCase__ = pipe(**_lowercase ).images UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowercase_ (self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = self.get_dummy_inputs() UpperCAmelCase__ = pipe(**_lowercase ).images UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowercase_ (self : Any ) -> str: """simple docstring""" UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = self.get_dummy_inputs() UpperCAmelCase__ = pipe(**_lowercase ).images UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowercase_ (self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = self.get_dummy_inputs() UpperCAmelCase__ = pipe(**_lowercase ).images UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A ( unittest.TestCase ): @property def lowercase_ (self : List[Any] ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ (self : Tuple ) -> Any: """simple docstring""" UpperCAmelCase__ = ort.SessionOptions() UpperCAmelCase__ = False return options def lowercase_ (self : Any ) -> Dict: """simple docstring""" UpperCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) UpperCAmelCase__ = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = "A fantasy landscape, trending on artstation" UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = pipe( prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowercase , output_type="np" , ) UpperCAmelCase__ = output.images UpperCAmelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def lowercase_ (self : List[str] ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) UpperCAmelCase__ = init_image.resize((1_2_8, 1_2_8) ) UpperCAmelCase__ = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) UpperCAmelCase__ = "A fantasy landscape, trending on artstation" UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = pipe( prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowercase , output_type="np" , ) UpperCAmelCase__ = output.images UpperCAmelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
65
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase__ ( snake_case_ :Union[str, Any]=None ): if subparsers is not None: __UpperCAmelCase = subparsers.add_parser('''env''' ) else: __UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = is_xpu_available() __UpperCAmelCase = is_npu_available() __UpperCAmelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(snake_case_ ): __UpperCAmelCase = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(snake_case_ ), '''PyTorch NPU available''': str(snake_case_ ), '''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: __UpperCAmelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case_ , snake_case_ ) else F'''\t{accelerate_config}''' ) print(snake_case_ ) __UpperCAmelCase = accelerate_config return info def lowercase__ ( ): __UpperCAmelCase = env_command_parser() __UpperCAmelCase = parser.parse_args() env_command(snake_case_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , unittest.TestCase ): _a = KandinskyVaaImgaImgPipeline _a = ["image_embeds", "negative_image_embeds", "image"] _a = [ "image_embeds", "negative_image_embeds", "image", ] _a = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _a = False @property def __lowercase ( self : List[str] ): return 32 @property def __lowercase ( self : Any ): return 32 @property def __lowercase ( self : Union[str, Any] ): return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): return self.time_input_dim * 4 @property def __lowercase ( self : Union[str, Any] ): return 100 @property def __lowercase ( self : Optional[int] ): torch.manual_seed(0 ) lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowerCAmelCase = UNetaDConditionModel(**_lowercase ) return model @property def __lowercase ( self : Optional[Any] ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowercase ( self : List[Any] ): torch.manual_seed(0 ) lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Dict ): lowerCAmelCase = self.dummy_unet lowerCAmelCase = self.dummy_movq lowerCAmelCase = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } lowerCAmelCase = DDIMScheduler(**_lowercase ) lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=0 ): lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowercase ) # create init_image lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) ) if str(_lowercase ).startswith("""mps""" ): lowerCAmelCase = torch.manual_seed(_lowercase ) else: lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def __lowercase ( self : List[Any] ): lowerCAmelCase = """cpu""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_lowercase ) lowerCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) ) lowerCAmelCase = output.images lowerCAmelCase = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array( [0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : Optional[Any] ): lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowerCAmelCase = """A red cartoon frog, 4k""" lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) lowerCAmelCase = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase , lowerCAmelCase = pipe_prior( _lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() lowerCAmelCase = pipeline( image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
155
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 25_00_04 _lowercase : int = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Any = True a__ : List[str] = True def a ( self : str ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : str ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : str ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : str = "facebook/mbart-large-50-one-to-many-mmt" a__ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
332
0
"""simple docstring""" _UpperCamelCase : Any = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Any = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
220
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
0
from torch import nn def snake_case_ ( snake_case ) -> Tuple: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'Unsupported activation function: {act_fn}' )
196
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _a ( _lowerCAmelCase ): """simple docstring""" _lowerCamelCase : List[Any] = ["image_processor", "tokenizer"] _lowerCamelCase : Optional[Any] = "FlavaImageProcessor" _lowerCamelCase : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Any , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[int] ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _lowercase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_lowercase , _lowercase ) A_ = self.image_processor def __call__( self : int , UpperCAmelCase : Optional[ImageInput] = None , UpperCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: A_ = self.tokenizer( text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , ) if images is not None: A_ = self.image_processor( _lowercase , return_image_mask=_lowercase , return_codebook_pixels=_lowercase , return_tensors=_lowercase , **_lowercase , ) if text is not None and images is not None: encoding.update(_lowercase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase ) def __A ( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def __A ( self : List[str] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ): return self.tokenizer.decode(*_lowercase , **_lowercase ) @property def __A ( self : List[Any] ): A_ = self.tokenizer.model_input_names A_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self : str ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , ) return self.image_processor_class @property def __A ( self : int ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , ) return self.image_processor
312
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase__ ( snake_case_ :ndarray ): return np.dot(snake_case_ , snake_case_ ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , *, _lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ): __UpperCAmelCase = regularization __UpperCAmelCase = gamma if kernel == "linear": __UpperCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __UpperCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __UpperCAmelCase = F'''Unknown kernel: {kernel}''' raise ValueError(_lowercase ) def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ): return np.dot(_lowercase , _lowercase ) def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ): __UpperCAmelCase = observations __UpperCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__UpperCAmelCase) , ) = np.shape(_lowercase ) def to_minimize(_lowercase : ndarray ) -> float: __UpperCAmelCase = 0 ((__UpperCAmelCase) , ) = np.shape(_lowercase ) for i in range(_lowercase ): for j in range(_lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_lowercase ) __UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 ) __UpperCAmelCase = Bounds(0 , self.regularization ) __UpperCAmelCase = minimize( _lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x __UpperCAmelCase = l_star # calculating mean offset of separation plane to points __UpperCAmelCase = 0 for i in range(_lowercase ): for j in range(_lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __UpperCAmelCase = s / n def a ( self : List[Any] , _lowercase : ndarray ): __UpperCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : List[str] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __snake_case( _lowerCAmelCase = 100 ) -> Union[str, Any]: snake_case__ : str = 1 snake_case__ : Dict = 2 for i in range(2 , max_n + 1 ): snake_case__ : Dict = pre_numerator snake_case__ : str = 2 * i // 3 if i % 3 == 0 else 1 snake_case__ : Dict = cur_numerator snake_case__ : Union[str, Any] = e_cont * pre_numerator + temp return sum_digits(snake_case_ ) if __name__ == "__main__": print(F"{solution() = }")
35
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowercase : int = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> List[str]: '''simple docstring''' __UpperCamelCase : Optional[Any] = R"\w+[.]\d+" __UpperCamelCase : Optional[Any] = re.findall(snake_case_ , snake_case_) for pat in pats: __UpperCamelCase : str = key.replace(snake_case_ , "_".join(pat.split("."))) return key def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : List[str]) -> Optional[Any]: '''simple docstring''' __UpperCamelCase : int = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __UpperCamelCase : List[str] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __UpperCamelCase : List[str] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __UpperCamelCase : str = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer __UpperCamelCase : Any = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __UpperCamelCase : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0) return renamed_pt_tuple_key, pt_tensor # linear layer __UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": __UpperCamelCase : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __UpperCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int=42) -> Optional[int]: '''simple docstring''' __UpperCamelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __UpperCamelCase : List[Any] = flax_model.init_weights(PRNGKey(snake_case_)) __UpperCamelCase : Dict = flatten_dict(snake_case_) __UpperCamelCase : Optional[int] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __UpperCamelCase : int = rename_key(snake_case_) __UpperCamelCase : List[str] = tuple(renamed_pt_key.split(".")) # Correctly rename weight parameters __UpperCamelCase , __UpperCamelCase : int = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.') # also add unexpected weight so that warning is thrown __UpperCamelCase : str = jnp.asarray(snake_case_) return unflatten_dict(snake_case_)
232
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : Tuple , _lowercase : str , _lowercase : str ): __UpperCAmelCase , __UpperCAmelCase = text, pattern __UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase ) def a ( self : Optional[int] , _lowercase : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self : int , _lowercase : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self : Optional[Any] ): # searches pattern in text and returns index positions __UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: __UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase : str = 'ABAABA' _lowercase : Tuple = 'AB' _lowercase : Dict = BoyerMooreSearch(text, pattern) _lowercase : Any = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
332
0
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder _UpperCamelCase = datasets.utils.logging.get_logger(__name__) class __lowercase (folder_based_builder.FolderBasedBuilderConfig ): _UpperCamelCase = None _UpperCamelCase = None class __lowercase (folder_based_builder.FolderBasedBuilder ): _UpperCamelCase = datasets.Audio() _UpperCamelCase = "audio" _UpperCamelCase = AudioFolderConfig _UpperCamelCase = 42 # definition at the bottom of the script _UpperCamelCase = AudioClassification(audio_column="""audio""" , label_column="""label""" ) _UpperCamelCase = [ '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', ] _UpperCamelCase = AUDIO_EXTENSIONS
275
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCAmelCase : a__ : int a__ : Node | None = None a__ : Node | None = None def lowercase__ ( ): __UpperCAmelCase = Node(1 ) __UpperCAmelCase = Node(2 ) __UpperCAmelCase = Node(3 ) __UpperCAmelCase = Node(4 ) __UpperCAmelCase = Node(5 ) return tree def lowercase__ ( snake_case_ :Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__ ( snake_case_ :Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase__ ( snake_case_ :Node | None ): __UpperCAmelCase = [] if root is None: return output __UpperCAmelCase = deque([root] ) while process_queue: __UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None ): if root is None: return [] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 0 return output def lowercase__ ( ): # Main function for testing. __UpperCAmelCase = make_tree() print(F'''In-order Traversal: {inorder(snake_case_ )}''' ) print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' ) print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' ) print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(snake_case_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(snake_case_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): snake_case_ = "swinv2" snake_case_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , __lowercase : List[Any]=2_24 , __lowercase : int=4 , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=96 , __lowercase : Optional[int]=[2, 2, 6, 2] , __lowercase : Optional[int]=[3, 6, 12, 24] , __lowercase : str=7 , __lowercase : Union[str, Any]=4.0 , __lowercase : List[str]=True , __lowercase : List[Any]=0.0 , __lowercase : Dict=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]="gelu" , __lowercase : Tuple=False , __lowercase : Optional[int]=0.02 , __lowercase : List[Any]=1e-5 , __lowercase : Tuple=32 , **__lowercase : Optional[int] , ) -> Optional[int]: super().__init__(**_lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image_size SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Optional[int] =num_channels SCREAMING_SNAKE_CASE__ : List[str] =embed_dim SCREAMING_SNAKE_CASE__ : Optional[int] =depths SCREAMING_SNAKE_CASE__ : Dict =len(_lowercase ) SCREAMING_SNAKE_CASE__ : str =num_heads SCREAMING_SNAKE_CASE__ : Any =window_size SCREAMING_SNAKE_CASE__ : List[Any] =mlp_ratio SCREAMING_SNAKE_CASE__ : Tuple =qkv_bias SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Dict =drop_path_rate SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] =use_absolute_embeddings SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE__ : int =int(embed_dim * 2 ** (len(_lowercase ) - 1) ) SCREAMING_SNAKE_CASE__ : List[Any] =(0, 0, 0, 0)
152
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
332
0
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
91
"""simple docstring""" def lowercase__ ( snake_case_ :Union[str, Any] ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = max(snake_case_ ) __UpperCAmelCase = min(snake_case_ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowercase__ ( snake_case_ :str ): return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" _lowercase : int = input('Enter numbers separated by a comma:\n').strip() _lowercase : int = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
332
0
"""simple docstring""" import os def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[Any] = len(grid[0] ) __lowerCAmelCase : Optional[Any] = len(snake_case_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : Union[str, Any] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(snake_case_ ): for j in range(n_rows - 3 ): __lowerCAmelCase : int = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __lowerCAmelCase : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __lowerCAmelCase : Union[str, Any] = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __lowerCAmelCase : str = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __lowerCAmelCase : List[str] = max( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if max_product > largest: __lowerCAmelCase : Any = max_product return largest def __lowerCAmelCase (): __lowerCAmelCase : Optional[int] = [] with open(os.path.dirname(snake_case_ ) + '/grid.txt' ) as file: for line in file: grid.append(line.strip('\n' ).split(' ' ) ) __lowerCAmelCase : Union[str, Any] = [[int(snake_case_ ) for i in grid[j]] for j in range(len(snake_case_ ) )] return largest_product(snake_case_ ) if __name__ == "__main__": print(solution())
86
"""simple docstring""" from collections import defaultdict def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = first_str.lower().strip() __UpperCAmelCase = second_str.lower().strip() # Remove whitespace __UpperCAmelCase = first_str.replace(''' ''' , '''''' ) __UpperCAmelCase = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(snake_case_ ) != len(snake_case_ ): return False # Default values for count should be 0 __UpperCAmelCase = defaultdict(snake_case_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(snake_case_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _lowercase : List[Any] = input('Enter the first string ').strip() _lowercase : Tuple = input('Enter the second string ').strip() _lowercase : str = check_anagrams(input_a, input_b) print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
332
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase__ = logging.get_logger(__name__) class A ( _lowerCAmelCase ): __UpperCAmelCase : str = ["pixel_values"] def __init__(self : str , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : int , ) -> Optional[int]: """simple docstring""" super().__init__(**_lowercase ) UpperCAmelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6} UpperCAmelCase__ = get_size_dict(_lowercase ) UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} UpperCAmelCase__ = get_size_dict(_lowercase , param_name="crop_size" ) UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = resample UpperCAmelCase__ = do_center_crop UpperCAmelCase__ = crop_size UpperCAmelCase__ = do_rescale UpperCAmelCase__ = rescale_factor UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> List[str]: """simple docstring""" UpperCAmelCase__ = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return resize( _lowercase , size=(size["height"], size["width"]) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def lowercase_ (self : List[str] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ) -> Any: """simple docstring""" UpperCAmelCase__ = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return center_crop(_lowercase , size=(size["height"], size["width"]) , data_format=_lowercase , **_lowercase ) def lowercase_ (self : List[Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Any , ) -> int: """simple docstring""" return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def lowercase_ (self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple , ) -> Tuple: """simple docstring""" return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[Any] , ) -> List[str]: """simple docstring""" UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ = resample if resample is not None else self.resample UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ = image_std if image_std is not None else self.image_std UpperCAmelCase__ = size if size is not None else self.size UpperCAmelCase__ = get_size_dict(_lowercase ) UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ = get_size_dict(_lowercase , param_name="crop_size" ) UpperCAmelCase__ = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase__ = [to_numpy_array(_lowercase ) for image in images] if do_resize: UpperCAmelCase__ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images] if do_center_crop: UpperCAmelCase__ = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images] if do_rescale: UpperCAmelCase__ = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: UpperCAmelCase__ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] UpperCAmelCase__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] UpperCAmelCase__ = {"pixel_values": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
65
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Union[str, Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ): __UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) # set architectures equal to `None` __UpperCAmelCase = None __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Tuple ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Any ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def a ( self : List[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : str ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
332
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure)
155
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _UpperCAmelCase ( _lowerCAmelCase ): def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ): if tokenize_kwargs is None: __UpperCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __UpperCAmelCase = truncation __UpperCAmelCase = tokenize_kwargs __UpperCAmelCase = {} if return_tensors is not None: __UpperCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): __UpperCAmelCase = self.framework __UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) return model_inputs def a ( self : List[str] , _lowercase : Tuple ): __UpperCAmelCase = self.model(**_lowercase ) return model_outputs def a ( self : int , _lowercase : Tuple , _lowercase : str=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): return super().__call__(*_lowercase , **_lowercase )
332
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCAmelCase ): UpperCAmelCase_ : Tuple =["torch", "scipy"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ): requires_backends(self , ['torch', 'scipy'] ) @classmethod def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ): requires_backends(cls , ['torch', 'scipy'] ) @classmethod def UpperCamelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ): requires_backends(cls , ['torch', 'scipy'] )
220
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowercase : Union[str, Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowercase__ ( snake_case_ :List[Any] ): if isinstance(snake_case_ , torch.Tensor ): return image elif isinstance(snake_case_ , PIL.Image.Image ): __UpperCAmelCase = [image] __UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image] __UpperCAmelCase = torch.stack(snake_case_ ) return image class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Any , _lowercase : str , _lowercase : str ): super().__init__() # make sure scheduler can always be converted to DDIM __UpperCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowercase , scheduler=_lowercase ) def a ( self : int , _lowercase : List[str] ): if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): # get the original timestep using init_timestep __UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase ) __UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ): if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' ) __UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __UpperCAmelCase = init_latents.shape __UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents print('''add noise to latents at timestep''' , _lowercase ) __UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = init_latents return latents @torch.no_grad() def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ): self.check_inputs(_lowercase ) # 2. Preprocess image __UpperCAmelCase = preprocess(_lowercase ) # 3. set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) __UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device ) __UpperCAmelCase = timesteps[:1].repeat(_lowercase ) # 4. Prepare latent variables __UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase ) __UpperCAmelCase = latents # 5. Denoising loop for t in self.progress_bar(_lowercase ): # 1. predict noise model_output __UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step( _lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowercase )
332
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
196
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Union[str, Any] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
332
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Optional[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
"""simple docstring""" _lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
0
'''simple docstring''' class UpperCAmelCase_ ( _lowerCAmelCase ): """simple docstring""" pass class UpperCAmelCase_ ( _lowerCAmelCase ): """simple docstring""" pass class UpperCAmelCase_ : """simple docstring""" def __init__( self : Any ): snake_case__ : Dict = [ [], [], [], ] def lowerCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : int ): try: if len(self.queues[priority] ) >= 100: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(_lowercase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def lowerCamelCase ( self : Optional[Any] ): for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Optional[Any] ): return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) ) class UpperCAmelCase_ : """simple docstring""" def __init__( self : int ): snake_case__ : str = [] def lowerCamelCase ( self : Dict , snake_case_ : int ): if len(self.queue ) == 100: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(_lowercase ) def lowerCamelCase ( self : Optional[int] ): if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: snake_case__ : Union[str, Any] = min(self.queue ) self.queue.remove(_lowercase ) return data def __str__( self : List[str] ): return str(self.queue ) def __snake_case( ) -> str: snake_case__ : List[Any] = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(snake_case_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(snake_case_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __snake_case( ) -> List[Any]: snake_case__ : Optional[int] = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(snake_case_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(snake_case_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
35
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase__ ( snake_case_ :Optional[int] ): return EnvironmentCommand() def lowercase__ ( snake_case_ :List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class _UpperCAmelCase ( _lowerCAmelCase ): @staticmethod def a ( _lowercase : ArgumentParser ): __UpperCAmelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=_lowercase ) download_parser.add_argument( '''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_lowercase ) def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ): __UpperCAmelCase = accelerate_config_file def a ( self : Dict ): __UpperCAmelCase = '''not installed''' if is_safetensors_available(): import safetensors __UpperCAmelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors __UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = __UpperCAmelCase = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __UpperCAmelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_lowercase ): __UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict() __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_lowercase , _lowercase ) else F'''\t{accelerate_config}''' ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_torch_available(): import torch __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_tf_available(): import tensorflow as tf __UpperCAmelCase = tf.__version__ try: # deprecated in v2.1 __UpperCAmelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_flax_available(): import flax import jax import jaxlib __UpperCAmelCase = flax.__version__ __UpperCAmelCase = jax.__version__ __UpperCAmelCase = jaxlib.__version__ __UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform __UpperCAmelCase = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_lowercase ) ) return info @staticmethod def a ( _lowercase : str ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
332
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = ["image_processor", "tokenizer"] _A = "LayoutLMv3ImageProcessor" _A = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self :str , a :Union[str, Any]=None , a :Union[str, Any]=None , **a :Optional[Any] ) -> int: __UpperCamelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _lowercase , ) __UpperCamelCase : Dict = kwargs.pop("feature_extractor" ) __UpperCamelCase : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_lowercase , _lowercase ) def __call__( self :Union[str, Any] , a :str , a :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a :Union[List[List[int]], List[List[List[int]]]] = None , a :Optional[Union[List[int], List[List[int]]]] = None , a :bool = True , a :Union[bool, str, PaddingStrategy] = False , a :Union[bool, str, TruncationStrategy] = None , a :Optional[int] = None , a :int = 0 , a :Optional[int] = None , a :Optional[bool] = None , a :Optional[bool] = None , a :bool = False , a :bool = False , a :bool = False , a :bool = False , a :bool = True , a :Optional[Union[str, TensorType]] = None , **a :Any , ) -> Optional[int]: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor __UpperCamelCase : str = self.image_processor(images=_lowercase , return_tensors=_lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_lowercase , _lowercase ): __UpperCamelCase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) __UpperCamelCase : int = features["words"] __UpperCamelCase : Union[str, Any] = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , ) # add pixel values __UpperCamelCase : List[Any] = features.pop("pixel_values" ) if return_overflowing_tokens is True: __UpperCamelCase : Dict = self.get_overflowing_images(_lowercase , encoded_inputs["overflow_to_sample_mapping"] ) __UpperCamelCase : Optional[Any] = images return encoded_inputs def _lowerCamelCase ( self :int , a :Optional[int] , a :Optional[int] ) -> List[str]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image __UpperCamelCase : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_lowercase ) != len(_lowercase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(_lowercase )} and {len(_lowercase )}' ) return images_with_overflow def _lowerCamelCase ( self :Any , *a :int , **a :List[Any] ) -> Dict: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def _lowerCamelCase ( self :Union[str, Any] , *a :Dict , **a :Dict ) -> List[str]: return self.tokenizer.decode(*_lowercase , **_lowercase ) @property def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self :Dict ) -> Dict: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , ) return self.image_processor_class @property def _lowerCamelCase ( self :Dict ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , ) return self.image_processor
232
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ): __UpperCAmelCase = sorted(numsa + numsa ) __UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()] _lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
332
0
from __future__ import annotations import time import numpy as np _UpperCamelCase = [8, 5, 9, 7] _UpperCamelCase = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _UpperCamelCase = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __lowercase : def __init__( self , A_ , A_ , A_ , ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = claim_vector __lowerCAmelCase : str = allocated_resources_table __lowerCAmelCase : Tuple = maximum_claim_table def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowercase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return {self.__need().index(_lowercase ): i for i in self.__need()} def UpperCamelCase__ ( self , **A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.__need() __lowerCAmelCase : str = self.__allocated_resources_table __lowerCAmelCase : Any = self.__available_resources() __lowerCAmelCase : List[str] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: __lowerCAmelCase : Optional[int] = False for each_need in need_list: __lowerCAmelCase : Tuple = True for index, need in enumerate(_lowercase ): if need > available_resources[index]: __lowerCAmelCase : List[str] = False break if execution: __lowerCAmelCase : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase : str = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(_lowercase ) # update available/freed resources stack __lowerCAmelCase : str = np.array(_lowercase ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(_lowercase ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(_lowercase ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(_lowercase ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(_lowercase ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(_lowercase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
275
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ): __UpperCAmelCase = str(id_ ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = [] __UpperCAmelCase = {} # {vertex:distance} def __lt__( self : str , _lowercase : List[Any] ): return self.key < other.key def __repr__( self : int ): return self.id def a ( self : Union[str, Any] , _lowercase : int ): self.neighbors.append(_lowercase ) def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): __UpperCAmelCase = weight def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): __UpperCAmelCase = [] for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = graph[:] while q: __UpperCAmelCase = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = list(snake_case_ ) hq.heapify(snake_case_ ) while h: __UpperCAmelCase = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase__ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] a_ = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =calculate_rouge(snake_case_, snake_case_, bootstrap_aggregation=snake_case_, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(snake_case_, snake_case_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =calculate_rouge(snake_case_, snake_case_, bootstrap_aggregation=snake_case_, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] ='''rougeLsum''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_, rouge_keys=[k] )[k] SCREAMING_SNAKE_CASE__ : Any =calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_, rouge_keys=[k] )[k] assert score > score_no_sep def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =['''rouge1''', '''rouge2''', '''rougeL'''] SCREAMING_SNAKE_CASE__ : Dict =calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_, rouge_keys=snake_case_ ) SCREAMING_SNAKE_CASE__ : Dict =calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_, rouge_keys=snake_case_ ) assert score_sep == score_no_sep def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =[ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] SCREAMING_SNAKE_CASE__ : Optional[int] =[ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_ ) == calculate_rouge(snake_case_, snake_case_, newline_sep=snake_case_ ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =[ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] SCREAMING_SNAKE_CASE__ : str =[ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] SCREAMING_SNAKE_CASE__ : Union[str, Any] =calculate_rouge(snake_case_, snake_case_, rouge_keys=['''rougeLsum'''], newline_sep=snake_case_ )['''rougeLsum'''] SCREAMING_SNAKE_CASE__ : List[str] =calculate_rouge(snake_case_, snake_case_, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =Path('''examples/seq2seq/test_data/wmt_en_ro''' ) SCREAMING_SNAKE_CASE__ : List[Any] =calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(snake_case_, snake_case_ ) SCREAMING_SNAKE_CASE__ : List[str] =calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=snake_case_ ) assert isinstance(snake_case_, snake_case_ )
152
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Dict = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "swinv2" a__ : List[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ): super().__init__(**_lowercase ) __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = len(_lowercase ) __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) __UpperCAmelCase = (0, 0, 0, 0)
332
0
"""simple docstring""" import numpy as np def _A (__a ) -> Any: """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import pprint import requests _lowercase : Optional[Any] = 'https://zenquotes.io/api' def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": _lowercase : int = random_quotes() pprint.pprint(response)
332
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class A__ ( _lowerCAmelCase): A_ : Union[str, Any] = "ibert" def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="none" , **_SCREAMING_SNAKE_CASE , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __lowerCAmelCase : List[Any] = vocab_size __lowerCAmelCase : Union[str, Any] = hidden_size __lowerCAmelCase : Optional[Any] = num_hidden_layers __lowerCAmelCase : Optional[Any] = num_attention_heads __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : Optional[Any] = intermediate_size __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Tuple = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : Tuple = type_vocab_size __lowerCAmelCase : Dict = initializer_range __lowerCAmelCase : List[Any] = layer_norm_eps __lowerCAmelCase : Tuple = position_embedding_type __lowerCAmelCase : int = quant_mode __lowerCAmelCase : List[str] = force_dequant class A__ ( _lowerCAmelCase): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": __lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowerCAmelCase : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
86
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
0
import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowerCAmelCase_ ( __A ) -> List[str]: '''simple docstring''' UpperCAmelCase__ = botoa.client("iam" ) UpperCAmelCase__ = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=snake_case_, AssumeRolePolicyDocument=json.dumps(snake_case_, indent=2 ) ) UpperCAmelCase__ = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=snake_case_, PolicyName=f"""{role_name}_policy_permission""", PolicyDocument=json.dumps(snake_case_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"""role {role_name} already exists. Using existing one""" ) def lowerCAmelCase_ ( __A ) -> Dict: '''simple docstring''' UpperCAmelCase__ = botoa.client("iam" ) return iam_client.get_role(RoleName=snake_case_ )["Role"]["Arn"] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase__ = _ask_options( "How do you want to authorize?", ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], snake_case_, ) UpperCAmelCase__ = None if credentials_configuration == 0: UpperCAmelCase__ = _ask_field("Enter your AWS Profile name: [default] ", default="default" ) UpperCAmelCase__ = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) UpperCAmelCase__ = _ask_field("AWS Access Key ID: " ) UpperCAmelCase__ = aws_access_key_id UpperCAmelCase__ = _ask_field("AWS Secret Access Key: " ) UpperCAmelCase__ = aws_secret_access_key UpperCAmelCase__ = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1" ) UpperCAmelCase__ = aws_region UpperCAmelCase__ = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", ["Provide IAM Role name", "Create new IAM role using credentials"], snake_case_, ) if role_management == 0: UpperCAmelCase__ = _ask_field("Enter your IAM role name: " ) else: UpperCAmelCase__ = "accelerate_sagemaker_execution_role" print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" ) _create_iam_role_for_sagemaker(snake_case_ ) UpperCAmelCase__ = _ask_field( "Do you want to use custom Docker image? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) UpperCAmelCase__ = None if is_custom_docker_image: UpperCAmelCase__ = _ask_field("Enter your Docker image: ", lambda __A : str(snake_case_ ).lower() ) UpperCAmelCase__ = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) UpperCAmelCase__ = None if is_sagemaker_inputs_enabled: UpperCAmelCase__ = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", lambda __A : str(snake_case_ ).lower(), ) UpperCAmelCase__ = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) UpperCAmelCase__ = None if is_sagemaker_metrics_enabled: UpperCAmelCase__ = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", lambda __A : str(snake_case_ ).lower(), ) UpperCAmelCase__ = _ask_options( "What is the distributed mode?", ["No distributed training", "Data parallelism"], _convert_sagemaker_distributed_mode, ) UpperCAmelCase__ = {} UpperCAmelCase__ = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) if use_dynamo: UpperCAmelCase__ = "dynamo_" UpperCAmelCase__ = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) UpperCAmelCase__ = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) if use_custom_options: UpperCAmelCase__ = _ask_options( "Which mode do you want to use?", snake_case_, lambda __A : TORCH_DYNAMO_MODES[int(snake_case_ )], default="default", ) UpperCAmelCase__ = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) UpperCAmelCase__ = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=snake_case_, error_message="Please enter yes or no.", ) UpperCAmelCase__ = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: UpperCAmelCase__ = _ask_options( snake_case_, snake_case_, lambda __A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(snake_case_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" UpperCAmelCase__ = _ask_field(snake_case_, lambda __A : str(snake_case_ ).lower(), default="ml.p3.2xlarge" ) UpperCAmelCase__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): UpperCAmelCase__ = _ask_field( "How many machines do you want use? [1]: ", snake_case_, default=1, ) UpperCAmelCase__ = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=snake_case_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=snake_case_, use_cpu=snake_case_, dynamo_config=snake_case_, eca_instance_type=snake_case_, profile=snake_case_, region=snake_case_, iam_role_name=snake_case_, mixed_precision=snake_case_, num_machines=snake_case_, sagemaker_inputs_file=snake_case_, sagemaker_metrics_file=snake_case_, )
65
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase__ ( snake_case_ :Union[str, Any]=None ): if subparsers is not None: __UpperCAmelCase = subparsers.add_parser('''env''' ) else: __UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = is_xpu_available() __UpperCAmelCase = is_npu_available() __UpperCAmelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(snake_case_ ): __UpperCAmelCase = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(snake_case_ ), '''PyTorch NPU available''': str(snake_case_ ), '''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: __UpperCAmelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case_ , snake_case_ ) else F'''\t{accelerate_config}''' ) print(snake_case_ ) __UpperCAmelCase = accelerate_config return info def lowercase__ ( ): __UpperCAmelCase = env_command_parser() __UpperCAmelCase = parser.parse_args() env_command(snake_case_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
155
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 25_00_04 _lowercase : int = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Any = True a__ : List[str] = True def a ( self : str ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : str ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : str ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : str = "facebook/mbart-large-50-one-to-many-mmt" a__ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
332
0
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures') class a ( unittest.TestCase ): def UpperCamelCase_ ( self ): # A mock response for an HTTP head request to emulate server down lowercase = mock.Mock() lowercase = 5_0_0 lowercase = {} lowercase = HTTPError lowercase = {} # Download this model to make sure it's in the cache. lowercase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=_lowercase ) as mock_head: lowercase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): # This test is for deprecated behavior and can be removed in v5 lowercase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class a ( unittest.TestCase ): @classmethod def UpperCamelCase_ ( cls ): lowercase = TOKEN HfFolder.save_token(_lowercase ) @classmethod def UpperCamelCase_ ( cls ): try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCamelCase_ ( self ): lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _lowercase , repo_id='test-feature-extractor' , push_to_hub=_lowercase , use_auth_token=self._token ) lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def UpperCamelCase_ ( self ): lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) lowercase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _lowercase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowercase , use_auth_token=self._token ) lowercase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def UpperCamelCase_ ( self ): CustomFeatureExtractor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) lowercase = AutoFeatureExtractor.from_pretrained( F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
220
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] __lowerCAmelCase = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase = {F'''funnel-transformer/{name}''': 5_12 for name in _model_names} __lowerCAmelCase = {F'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names} class __a ( _lowerCAmelCase ): __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : int = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION __lowercase : List[str] = FunnelTokenizer __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : int = 2 def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , clean_text=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , wordpieces_prefix=_lowercase , **_lowercase , ) lowercase__: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowercase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars ): lowercase__: Optional[Any] = getattr(_lowercase , normalizer_state.pop('type' ) ) lowercase__: Union[str, Any] = do_lower_case lowercase__: Any = strip_accents lowercase__: Union[str, Any] = tokenize_chinese_chars lowercase__: Optional[int] = normalizer_class(**_lowercase ) lowercase__: Union[str, Any] = do_lower_case def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: '''simple docstring''' lowercase__: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Any: '''simple docstring''' lowercase__: Union[str, Any] = [self.sep_token_id] lowercase__: List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]: '''simple docstring''' lowercase__: List[str] = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
196
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" while second != 0: A_ = first & second first ^= second A_ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __a :int = int(input('Enter the first number: ').strip()) __a :Tuple = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
312
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase__ ( snake_case_ :ndarray ): return np.dot(snake_case_ , snake_case_ ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , *, _lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ): __UpperCAmelCase = regularization __UpperCAmelCase = gamma if kernel == "linear": __UpperCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __UpperCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __UpperCAmelCase = F'''Unknown kernel: {kernel}''' raise ValueError(_lowercase ) def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ): return np.dot(_lowercase , _lowercase ) def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ): __UpperCAmelCase = observations __UpperCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__UpperCAmelCase) , ) = np.shape(_lowercase ) def to_minimize(_lowercase : ndarray ) -> float: __UpperCAmelCase = 0 ((__UpperCAmelCase) , ) = np.shape(_lowercase ) for i in range(_lowercase ): for j in range(_lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_lowercase ) __UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 ) __UpperCAmelCase = Bounds(0 , self.regularization ) __UpperCAmelCase = minimize( _lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x __UpperCAmelCase = l_star # calculating mean offset of separation plane to points __UpperCAmelCase = 0 for i in range(_lowercase ): for j in range(_lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __UpperCAmelCase = s / n def a ( self : List[Any] , _lowercase : ndarray ): __UpperCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' __a = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __a = [{'type': 'code', 'content': INSTALL_CONTENT}] __a = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
35
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "" _A = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _A = None # compression type in fsspec. ex: "gzip" _A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self :str , a :str = "" , a :Optional[str] = None , a :Optional[dict] = None , **a :Optional[int] ) -> Union[str, Any]: super().__init__(self , **_lowercase ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode __UpperCamelCase : Optional[Any] = fsspec.open( _lowercase , mode="rb" , protocol=_lowercase , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) __UpperCamelCase : int = os.path.basename(self.file.path.split("::" )[0] ) __UpperCamelCase : str = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) __UpperCamelCase : List[str] = None @classmethod def _lowerCamelCase ( cls :Optional[Any] , a :Tuple ) -> Union[str, Any]: # compressed file paths are always relative to the archive root return super()._strip_protocol(_lowercase ).lstrip("/" ) def _lowerCamelCase ( self :int ) -> List[Any]: if self.dir_cache is None: __UpperCamelCase : int = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} __UpperCamelCase : Tuple = {f["name"]: f} def _lowerCamelCase ( self :List[Any] , a :str ) -> List[Any]: return self.file.open().read() def _lowerCamelCase ( self :Optional[int] , a :str , a :str = "rb" , a :Any=None , a :Optional[Any]=True , a :Dict=None , **a :Optional[int] , ) -> int: __UpperCamelCase : Optional[int] = self._strip_protocol(_lowercase ) if mode != "rb": raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "bz2" _A = "bz2" _A = ".bz2" class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "gzip" _A = "gzip" _A = ".gz" class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "lz4" _A = "lz4" _A = ".lz4" class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "xz" _A = "xz" _A = ".xz" class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' _A = "zstd" _A = "zstd" _A = ".zst" def __init__( self :List[Any] , a :str , a :str = "rb" , a :Optional[str] = None , a :Optional[dict] = None , a :int = DEFAULT_BLOCK_SIZE , **a :Dict , ) -> Optional[int]: super().__init__( fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 __UpperCamelCase : Optional[int] = self.file.__enter__ class lowerCamelCase__ : '''simple docstring''' def __init__( self :Union[str, Any] , a :Any ) -> Optional[int]: __UpperCamelCase : Dict = file_ def __enter__( self :str ) -> Optional[int]: self._file.__enter__() return self def __exit__( self :Union[str, Any] , *a :List[Any] , **a :Any ) -> Tuple: self._file.__exit__(*_lowercase , **_lowercase ) def __iter__( self :str ) -> Optional[int]: return iter(self._file ) def _lowerCamelCase ( self :Optional[int] ) -> Any: return next(self._file ) def __getattr__( self :Optional[int] , a :Tuple ) -> Tuple: return getattr(self._file , _lowercase ) def fixed_enter(*a :Any , **a :int ): return WrappedFile(_enter(*_lowercase , **_lowercase ) ) __UpperCamelCase : str = fixed_enter
232
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : Tuple , _lowercase : str , _lowercase : str ): __UpperCAmelCase , __UpperCAmelCase = text, pattern __UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase ) def a ( self : Optional[int] , _lowercase : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self : int , _lowercase : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self : Optional[Any] ): # searches pattern in text and returns index positions __UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: __UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase : str = 'ABAABA' _lowercase : Tuple = 'AB' _lowercase : Dict = BoyerMooreSearch(text, pattern) _lowercase : Any = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
332
0
import cva import numpy as np class __lowercase : def __init__( self , A_ , A_ ) ->Any: '''simple docstring''' if k in (0.04, 0.06): __lowerCAmelCase : List[Any] = k __lowerCAmelCase : List[Any] = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ) ->Optional[Any]: '''simple docstring''' return str(self.k ) def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = cva.imread(_lowercase , 0 ) __lowerCAmelCase, __lowerCAmelCase : Dict = img.shape __lowerCAmelCase : Tuple = [] __lowerCAmelCase : Any = img.copy() __lowerCAmelCase : Tuple = cva.cvtColor(_lowercase , cva.COLOR_GRAY2RGB ) __lowerCAmelCase, __lowerCAmelCase : Optional[int] = np.gradient(_lowercase ) __lowerCAmelCase : Tuple = dx**2 __lowerCAmelCase : Optional[Any] = dy**2 __lowerCAmelCase : Tuple = dx * dy __lowerCAmelCase : str = 0.04 __lowerCAmelCase : Any = self.window_size // 2 for y in range(_lowercase , h - offset ): for x in range(_lowercase , w - offset ): __lowerCAmelCase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : Union[str, Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : List[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : List[Any] = (wxx * wyy) - (wxy**2) __lowerCAmelCase : Tuple = wxx + wyy __lowerCAmelCase : Dict = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _UpperCamelCase = HarrisCorner(0.04, 3) _UpperCamelCase = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
275
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCAmelCase : a__ : int a__ : Node | None = None a__ : Node | None = None def lowercase__ ( ): __UpperCAmelCase = Node(1 ) __UpperCAmelCase = Node(2 ) __UpperCAmelCase = Node(3 ) __UpperCAmelCase = Node(4 ) __UpperCAmelCase = Node(5 ) return tree def lowercase__ ( snake_case_ :Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__ ( snake_case_ :Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase__ ( snake_case_ :Node | None ): __UpperCAmelCase = [] if root is None: return output __UpperCAmelCase = deque([root] ) while process_queue: __UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None ): if root is None: return [] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 0 return output def lowercase__ ( ): # Main function for testing. __UpperCAmelCase = make_tree() print(F'''In-order Traversal: {inorder(snake_case_ )}''' ) print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' ) print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' ) print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(snake_case_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(snake_case_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ): snake_case_ = "maskformer-swin" snake_case_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Union[str, Any] , __lowercase : List[str]=2_24 , __lowercase : List[Any]=4 , __lowercase : Union[str, Any]=3 , __lowercase : Any=96 , __lowercase : Dict=[2, 2, 6, 2] , __lowercase : Any=[3, 6, 12, 24] , __lowercase : Optional[int]=7 , __lowercase : Dict=4.0 , __lowercase : List[Any]=True , __lowercase : Optional[int]=0.0 , __lowercase : Any=0.0 , __lowercase : Dict=0.1 , __lowercase : List[Any]="gelu" , __lowercase : List[str]=False , __lowercase : Tuple=0.02 , __lowercase : Optional[int]=1e-5 , __lowercase : Tuple=None , __lowercase : Optional[Any]=None , **__lowercase : Union[str, Any] , ) -> int: super().__init__(**_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =image_size SCREAMING_SNAKE_CASE__ : str =patch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =num_channels SCREAMING_SNAKE_CASE__ : Union[str, Any] =embed_dim SCREAMING_SNAKE_CASE__ : Optional[int] =depths SCREAMING_SNAKE_CASE__ : List[str] =len(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =num_heads SCREAMING_SNAKE_CASE__ : Tuple =window_size SCREAMING_SNAKE_CASE__ : List[Any] =mlp_ratio SCREAMING_SNAKE_CASE__ : Optional[Any] =qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =drop_path_rate SCREAMING_SNAKE_CASE__ : List[Any] =hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_absolute_embeddings SCREAMING_SNAKE_CASE__ : Optional[int] =layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] =initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE__ : List[str] =int(embed_dim * 2 ** (len(_lowercase ) - 1) ) SCREAMING_SNAKE_CASE__ : Dict =['''stem'''] + [F"stage{idx}" for idx in range(1 , len(_lowercase ) + 1 )] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =get_aligned_output_features_output_indices( out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
152
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
332
0
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) UpperCAmelCase_ : List[Any] = logging.getLogger() def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = {} SCREAMING_SNAKE_CASE_ : Any = os.path.join(snake_case_ , '''all_results.json''' ) if os.path.exists(snake_case_ ): with open(snake_case_ , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : str = json.load(snake_case_ ) else: raise ValueError(f'can\'t find {path}' ) return results UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( _lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' import xla_spawn SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE_ : List[str] = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(_lowercase , '''argv''' , _lowercase): SCREAMING_SNAKE_CASE_ : Tuple = time() xla_spawn.main() SCREAMING_SNAKE_CASE_ : Optional[int] = time() SCREAMING_SNAKE_CASE_ : int = get_results(_lowercase) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' import xla_spawn SCREAMING_SNAKE_CASE_ : Tuple = ''' ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py '''.split() with patch.object(_lowercase , '''argv''' , _lowercase): xla_spawn.main()
91
"""simple docstring""" def lowercase__ ( snake_case_ :Union[str, Any] ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = max(snake_case_ ) __UpperCAmelCase = min(snake_case_ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowercase__ ( snake_case_ :str ): return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" _lowercase : int = input('Enter numbers separated by a comma:\n').strip() _lowercase : int = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
332
0
"""simple docstring""" import argparse import json import subprocess def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : int = [] __lowerCAmelCase : Dict = ( F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) __lowerCAmelCase : List[Any] = subprocess.run(snake_case_ , shell=snake_case_ , stdout=subprocess.PIPE ) __lowerCAmelCase : List[Any] = output.stdout.decode('utf-8' ) __lowerCAmelCase : List[Any] = json.loads(snake_case_ ) __lowerCAmelCase : Optional[Any] = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(snake_case_ ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(snake_case_ ) ) if len(snake_case_ ) > 0: __lowerCAmelCase : int = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"The following runners are offline:\n{failed}" ) if __name__ == "__main__": def __lowerCAmelCase (_UpperCamelCase ): return values.split(',' ) lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowerCamelCase__ = parser.parse_args() get_runner_status(args.target_runners, args.token)
86
"""simple docstring""" from collections import defaultdict def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = first_str.lower().strip() __UpperCAmelCase = second_str.lower().strip() # Remove whitespace __UpperCAmelCase = first_str.replace(''' ''' , '''''' ) __UpperCAmelCase = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(snake_case_ ) != len(snake_case_ ): return False # Default values for count should be 0 __UpperCAmelCase = defaultdict(snake_case_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(snake_case_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _lowercase : List[Any] = input('Enter the first string ').strip() _lowercase : Tuple = input('Enter the second string ').strip() _lowercase : str = check_anagrams(input_a, input_b) print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
332
0
from __future__ import annotations class A : def __init__(self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> List[Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = text, pattern UpperCAmelCase__ , UpperCAmelCase__ = len(_lowercase ), len(_lowercase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : str ) -> Any: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowercase_ (self : int , __UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowercase_ (self : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = [] for i in range(self.textLen - self.patLen + 1 ): UpperCAmelCase__ = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: UpperCAmelCase__ = self.match_in_pattern(self.text[mismatch_index] ) UpperCAmelCase__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions UpperCamelCase__ = 'ABAABA' UpperCamelCase__ = 'AB' UpperCamelCase__ = BoyerMooreSearch(text, pattern) UpperCamelCase__ = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
65
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Union[str, Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ): __UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) # set architectures equal to `None` __UpperCAmelCase = None __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Tuple ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Any ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def a ( self : List[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : str ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
332
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , unittest.TestCase ): _a = KandinskyVaaControlnetImgaImgPipeline _a = ["image_embeds", "negative_image_embeds", "image", "hint"] _a = ["image_embeds", "negative_image_embeds", "image", "hint"] _a = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _a = False @property def __lowercase ( self : Dict ): return 32 @property def __lowercase ( self : str ): return 32 @property def __lowercase ( self : Union[str, Any] ): return self.time_input_dim @property def __lowercase ( self : Optional[Any] ): return self.time_input_dim * 4 @property def __lowercase ( self : List[Any] ): return 100 @property def __lowercase ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCAmelCase = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowerCAmelCase = UNetaDConditionModel(**_lowercase ) return model @property def __lowercase ( self : List[str] ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : int ): torch.manual_seed(0 ) lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : List[Any] ): lowerCAmelCase = self.dummy_unet lowerCAmelCase = self.dummy_movq lowerCAmelCase = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } lowerCAmelCase = DDIMScheduler(**_lowercase ) lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Any=0 ): lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowercase ) # create init_image lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) ) # create hint lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) if str(_lowercase ).startswith("""mps""" ): lowerCAmelCase = torch.manual_seed(_lowercase ) else: lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def __lowercase ( self : str ): lowerCAmelCase = """cpu""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_lowercase ) lowerCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) ) lowerCAmelCase = output.images lowerCAmelCase = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array( [0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowercase ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : Tuple ): lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowerCAmelCase = init_image.resize((512, 512) ) lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) lowerCAmelCase = torch.from_numpy(np.array(_lowercase ) ).float() / 255.0 lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowerCAmelCase = """A robot, 4k photo""" lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) lowerCAmelCase = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase , lowerCAmelCase = pipe_prior( _lowercase , image=_lowercase , strength=0.85 , generator=_lowercase , negative_prompt="""""" , ).to_tuple() lowerCAmelCase = pipeline( image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
155
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _UpperCAmelCase ( _lowerCAmelCase ): def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ): if tokenize_kwargs is None: __UpperCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __UpperCAmelCase = truncation __UpperCAmelCase = tokenize_kwargs __UpperCAmelCase = {} if return_tensors is not None: __UpperCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): __UpperCAmelCase = self.framework __UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) return model_inputs def a ( self : List[str] , _lowercase : Tuple ): __UpperCAmelCase = self.model(**_lowercase ) return model_outputs def a ( self : int , _lowercase : Tuple , _lowercase : str=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): return super().__call__(*_lowercase , **_lowercase )
332
0
"""simple docstring""" from __future__ import annotations from math import gcd def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ): '''simple docstring''' if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int: return (pow(snake_case_ , 2 ) + step) % modulus for _ in range(snake_case_ ): # These track the position within the cycle detection logic. lowercase = seed lowercase = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase = rand_fn(snake_case_ , snake_case_ , snake_case_ ) lowercase = rand_fn(snake_case_ , snake_case_ , snake_case_ ) lowercase = rand_fn(snake_case_ , snake_case_ , snake_case_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase = gcd(hare - tortoise , snake_case_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) _UpperCamelCase : Optional[int] = parser.parse_args() _UpperCamelCase : Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'''{args.num} is probably prime''') else: _UpperCamelCase : List[str] = args.num // divisor print(F'''{args.num} = {divisor} * {quotient}''')
220
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowercase : Union[str, Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowercase__ ( snake_case_ :List[Any] ): if isinstance(snake_case_ , torch.Tensor ): return image elif isinstance(snake_case_ , PIL.Image.Image ): __UpperCAmelCase = [image] __UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image] __UpperCAmelCase = torch.stack(snake_case_ ) return image class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Any , _lowercase : str , _lowercase : str ): super().__init__() # make sure scheduler can always be converted to DDIM __UpperCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowercase , scheduler=_lowercase ) def a ( self : int , _lowercase : List[str] ): if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): # get the original timestep using init_timestep __UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase ) __UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ): if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' ) __UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __UpperCAmelCase = init_latents.shape __UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents print('''add noise to latents at timestep''' , _lowercase ) __UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = init_latents return latents @torch.no_grad() def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ): self.check_inputs(_lowercase ) # 2. Preprocess image __UpperCAmelCase = preprocess(_lowercase ) # 3. set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) __UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device ) __UpperCAmelCase = timesteps[:1].repeat(_lowercase ) # 4. Prepare latent variables __UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase ) __UpperCAmelCase = latents # 5. Denoising loop for t in self.progress_bar(_lowercase ): # 1. predict noise model_output __UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step( _lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowercase )
332
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class __a ( _lowerCAmelCase ): __lowercase : Tuple = "data2vec-text" def __init__( self , lowerCAmelCase__=30_522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) lowercase__: int = vocab_size lowercase__: str = hidden_size lowercase__: int = num_hidden_layers lowercase__: Dict = num_attention_heads lowercase__: Optional[Any] = hidden_act lowercase__: Dict = intermediate_size lowercase__: str = hidden_dropout_prob lowercase__: Tuple = attention_probs_dropout_prob lowercase__: Dict = max_position_embeddings lowercase__: str = type_vocab_size lowercase__: Tuple = initializer_range lowercase__: Any = layer_norm_eps lowercase__: Tuple = position_embedding_type lowercase__: Optional[Any] = use_cache lowercase__: Any = classifier_dropout class __a ( _lowerCAmelCase ): @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' if self.task == "multiple-choice": lowercase__: Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase__: str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
196
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Union[str, Any] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
332
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __a :Any = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys __a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
"""simple docstring""" _lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __a = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase__ ( snake_case_ :Optional[int] ): return EnvironmentCommand() def lowercase__ ( snake_case_ :List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class _UpperCAmelCase ( _lowerCAmelCase ): @staticmethod def a ( _lowercase : ArgumentParser ): __UpperCAmelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=_lowercase ) download_parser.add_argument( '''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_lowercase ) def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ): __UpperCAmelCase = accelerate_config_file def a ( self : Dict ): __UpperCAmelCase = '''not installed''' if is_safetensors_available(): import safetensors __UpperCAmelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors __UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = __UpperCAmelCase = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __UpperCAmelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_lowercase ): __UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict() __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_lowercase , _lowercase ) else F'''\t{accelerate_config}''' ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_torch_available(): import torch __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_tf_available(): import tensorflow as tf __UpperCAmelCase = tf.__version__ try: # deprecated in v2.1 __UpperCAmelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_flax_available(): import flax import jax import jaxlib __UpperCAmelCase = flax.__version__ __UpperCAmelCase = jax.__version__ __UpperCAmelCase = jaxlib.__version__ __UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform __UpperCAmelCase = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_lowercase ) ) return info @staticmethod def a ( _lowercase : str ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
332
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowercase : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' def __init__( self :Optional[Any] , *a :List[str] , **a :int ) -> int: warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
232
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ): __UpperCAmelCase = sorted(numsa + numsa ) __UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()] _lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
332
0
def _lowercase ( lowercase__ ): __lowerCAmelCase : int = len(snake_case_ ) while cur > 1: # Find the maximum number in arr __lowerCAmelCase : Optional[Any] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __lowerCAmelCase : List[Any] = arr[mi::-1] + arr[mi + 1 : len(snake_case_ )] # Reverse whole list __lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(snake_case_ )] cur -= 1 return arr if __name__ == "__main__": _UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() _UpperCamelCase = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
275
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ): __UpperCAmelCase = str(id_ ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = [] __UpperCAmelCase = {} # {vertex:distance} def __lt__( self : str , _lowercase : List[Any] ): return self.key < other.key def __repr__( self : int ): return self.id def a ( self : Union[str, Any] , _lowercase : int ): self.neighbors.append(_lowercase ) def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): __UpperCAmelCase = weight def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): __UpperCAmelCase = [] for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = graph[:] while q: __UpperCAmelCase = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = list(snake_case_ ) hq.heapify(snake_case_ ) while h: __UpperCAmelCase = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase__ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor a_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def _a( UpperCamelCase__ : List[Any] ): '''simple docstring''' if isinstance(snake_case_, torch.Tensor ): return image elif isinstance(snake_case_, PIL.Image.Image ): SCREAMING_SNAKE_CASE__ : Optional[Any] =[image] SCREAMING_SNAKE_CASE__ : Optional[int] =[trans(img.convert('''RGB''' ) ) for img in image] SCREAMING_SNAKE_CASE__ : Tuple =torch.stack(snake_case_ ) return image class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): def __init__( self : Any , __lowercase : str , __lowercase : str ) -> Tuple: super().__init__() # make sure scheduler can always be converted to DDIM SCREAMING_SNAKE_CASE__ : Tuple =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowercase , scheduler=_lowercase ) def __magic_name__ ( self : int , __lowercase : List[str] ) -> Tuple: if strength < 0 or strength > 1: raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" ) def __magic_name__ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : int ) -> Optional[Any]: # get the original timestep using init_timestep SCREAMING_SNAKE_CASE__ : Tuple =min(int(num_inference_steps * strength ) , _lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(num_inference_steps - init_timestep , 0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __magic_name__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ) -> str: if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" ) SCREAMING_SNAKE_CASE__ : int =image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) SCREAMING_SNAKE_CASE__ : List[Any] =init_latents.shape SCREAMING_SNAKE_CASE__ : int =randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents print('''add noise to latents at timestep''' , _lowercase ) SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =init_latents return latents @torch.no_grad() def __call__( self : Any , __lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowercase : float = 0.8 , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : float = 0.0 , __lowercase : int = 50 , __lowercase : Optional[bool] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , ) -> Tuple: self.check_inputs(_lowercase ) # 2. Preprocess image SCREAMING_SNAKE_CASE__ : Dict =preprocess(_lowercase ) # 3. set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self.get_timesteps(_lowercase , _lowercase , self.device ) SCREAMING_SNAKE_CASE__ : List[str] =timesteps[:1].repeat(_lowercase ) # 4. Prepare latent variables SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =latents # 5. Denoising loop for t in self.progress_bar(_lowercase ): # 1. predict noise model_output SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(_lowercase , _lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 SCREAMING_SNAKE_CASE__ : List[Any] =self.scheduler.step( _lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample SCREAMING_SNAKE_CASE__ : Any =(image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ : Optional[int] =self.numpy_to_pil(_lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowercase )
152
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Dict = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "swinv2" a__ : List[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ): super().__init__(**_lowercase ) __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = len(_lowercase ) __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) __UpperCAmelCase = (0, 0, 0, 0)
332
0
"""simple docstring""" import math def _A (__a , __a ) -> int: """simple docstring""" if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(snake_case_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCAmelCase_ : str = 'Enter the base and the power separated by a comma: ' UpperCAmelCase_ : Tuple = map(int, input(prompt).split(""",""")) UpperCAmelCase_ : int = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCAmelCase_ : str = res(xa, ya) UpperCAmelCase_ : Optional[Any] = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
91
"""simple docstring""" import pprint import requests _lowercase : Optional[Any] = 'https://zenquotes.io/api' def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": _lowercase : int = random_quotes() pprint.pprint(response)
332
0
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCamelCase__ = Lock() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() __lowerCAmelCase : Tuple = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left __lowerCAmelCase : Union[str, Any] = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() __lowerCAmelCase : Tuple = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right __lowerCAmelCase : Optional[Any] = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Any = [] __lowerCAmelCase : List[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop __lowerCAmelCase : Union[str, Any] = Pipe() __lowerCAmelCase : Union[str, Any] = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) __lowerCAmelCase : List[str] = temp_rs __lowerCAmelCase : Any = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): __lowerCAmelCase : Optional[Any] = Pipe() __lowerCAmelCase : Any = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) __lowerCAmelCase : Optional[int] = temp_rs __lowerCAmelCase : Tuple = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): __lowerCAmelCase : Dict = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCAmelCase (): __lowerCAmelCase : Optional[int] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*snake_case_ ) __lowerCAmelCase : Union[str, Any] = odd_even_transposition(snake_case_ ) print('Sorted List\n' ) print(*snake_case_ ) if __name__ == "__main__": main()
86
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class A ( unittest.TestCase ): __UpperCAmelCase : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) UpperCAmelCase__ = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 ) UpperCAmelCase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def lowercase_ (self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" for example in examples: UpperCAmelCase__ = video_classifier(_lowercase ) self.assertEqual( _lowercase , [ {"score": ANY(_lowercase ), "label": ANY(_lowercase )}, {"score": ANY(_lowercase ), "label": ANY(_lowercase )}, ] , ) @require_torch def lowercase_ (self : Any ) -> int: """simple docstring""" UpperCAmelCase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" UpperCAmelCase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} ) UpperCAmelCase__ = pipeline( "video-classification" , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 ) UpperCAmelCase__ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) UpperCAmelCase__ = video_classifier(_lowercase , top_k=2 ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , ) UpperCAmelCase__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ] , ) @require_tf def lowercase_ (self : int ) -> Optional[Any]: """simple docstring""" pass
65
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase__ ( snake_case_ :Union[str, Any]=None ): if subparsers is not None: __UpperCAmelCase = subparsers.add_parser('''env''' ) else: __UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = is_xpu_available() __UpperCAmelCase = is_npu_available() __UpperCAmelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(snake_case_ ): __UpperCAmelCase = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(snake_case_ ), '''PyTorch NPU available''': str(snake_case_ ), '''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: __UpperCAmelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case_ , snake_case_ ) else F'''\t{accelerate_config}''' ) print(snake_case_ ) __UpperCAmelCase = accelerate_config return info def lowercase__ ( ): __UpperCAmelCase = env_command_parser() __UpperCAmelCase = parser.parse_args() env_command(snake_case_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
0
"""simple docstring""" from math import factorial a = {str(d): factorial(d) for d in range(1_0)} def lowercase (snake_case__ : int ) -> str: '''simple docstring''' return sum(DIGIT_FACTORIAL[d] for d in str(snake_case_ ) ) def lowercase () -> List[Any]: '''simple docstring''' lowerCAmelCase = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , snake_case_ ) if sum_of_digit_factorial(snake_case_ ) == i ) if __name__ == "__main__": print(f"""{solution() = }""")
155
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 25_00_04 _lowercase : int = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Any = True a__ : List[str] = True def a ( self : str ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : str ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : str ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : str = "facebook/mbart-large-50-one-to-many-mmt" a__ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
332
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ): '''simple docstring''' lowercase = 3_84 if "tiny" in model_name: lowercase = [3, 3, 9, 3] lowercase = [96, 1_92, 3_84, 7_68] if "small" in model_name: lowercase = [3, 3, 27, 3] lowercase = [96, 1_92, 3_84, 7_68] if "base" in model_name: lowercase = [3, 3, 27, 3] lowercase = [1_28, 2_56, 5_12, 10_24] lowercase = 5_12 if "large" in model_name: lowercase = [3, 3, 27, 3] lowercase = [1_92, 3_84, 7_68, 15_36] lowercase = 7_68 if "xlarge" in model_name: lowercase = [3, 3, 27, 3] lowercase = [2_56, 5_12, 10_24, 20_48] lowercase = 10_24 # set label information lowercase = 1_50 lowercase = 'huggingface/label-files' lowercase = 'ade20k-id2label.json' lowercase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='dataset' ) , 'r' ) ) lowercase = {int(snake_case_ ): v for k, v in idalabel.items()} lowercase = {v: k for k, v in idalabel.items()} lowercase = ConvNextConfig( depths=snake_case_ , hidden_sizes=snake_case_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) lowercase = UperNetConfig( backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def _SCREAMING_SNAKE_CASE ( __snake_case : Any ): '''simple docstring''' lowercase = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') ) rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') ) rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') ) rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') ) rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') ) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') ) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') ) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') ) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') ) if i > 0: rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') ) rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') ) rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') ) rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Any ): '''simple docstring''' lowercase = dct.pop(snake_case_ ) lowercase = val def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : List[str] , __snake_case : Optional[Any] ): '''simple docstring''' lowercase = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } lowercase = model_name_to_url[model_name] lowercase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )['state_dict'] lowercase = get_upernet_config(snake_case_ ) lowercase = UperNetForSemanticSegmentation(snake_case_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowercase = state_dict.pop(snake_case_ ) if "bn" in key: lowercase = key.replace('bn' , 'batch_norm' ) lowercase = val # rename keys lowercase = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) model.load_state_dict(snake_case_ ) # verify on image lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' lowercase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('RGB' ) lowercase = SegformerImageProcessor() lowercase = processor(snake_case_ , return_tensors='pt' ).pixel_values with torch.no_grad(): lowercase = model(snake_case_ ) if model_name == "upernet-convnext-tiny": lowercase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": lowercase = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": lowercase = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": lowercase = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": lowercase = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case_ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _UpperCamelCase : List[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
220
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __a ( _lowerCAmelCase ): __lowercase : Any = ["input_features", "attention_mask"] def __init__( self , lowerCAmelCase__=80 , lowerCAmelCase__=16_000 , lowerCAmelCase__=80 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase ) lowercase__: str = num_mel_bins lowercase__: List[str] = do_ceptral_normalize lowercase__: List[Any] = normalize_means lowercase__: int = normalize_vars lowercase__: Any = True def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> Union[str, Any]: '''simple docstring''' lowercase__: int = waveform * (2**15) # Kaldi compliance: 16-bit signed integers lowercase__: Union[str, Any] = torch.from_numpy(_lowercase ).unsqueeze(0 ) lowercase__: Optional[int] = ta_kaldi.fbank(_lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0.0 , ) -> int: '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: lowercase__: Dict = x[:input_length].mean(axis=0 ) lowercase__: Dict = np.subtract(_lowercase , _lowercase ) if normalize_vars: lowercase__: int = x[:input_length].std(axis=0 ) lowercase__: str = np.divide(_lowercase , _lowercase ) if input_length < x.shape[0]: lowercase__: Any = padding_value # make sure array is in float32 lowercase__: List[Any] = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple: '''simple docstring''' lowercase__: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowercase , _lowercase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(_lowercase , _lowercase ) ] def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> str: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase__: Optional[int] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) lowercase__: Optional[int] = is_batched_numpy or ( isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase__: List[str] = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowercase , np.ndarray ): lowercase__: List[Any] = np.asarray(_lowercase , dtype=np.floataa ) elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__: Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__: Optional[int] = [raw_speech] # extract fbank features lowercase__: List[str] = [self._extract_fbank_features(_lowercase ) for waveform in raw_speech] # convert into correct format for padding lowercase__: Optional[Any] = BatchFeature({'input_features': features} ) lowercase__: int = self.pad( _lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , ) # make sure list is in array format lowercase__: str = padded_inputs.get('input_features' ) if isinstance(input_features[0] , _lowercase ): lowercase__: str = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features] lowercase__: Dict = padded_inputs.get('attention_mask' ) if attention_mask is not None: lowercase__: List[str] = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: lowercase__: List[Any] = ( np.array(_lowercase , dtype=np.intaa ) if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD else None ) lowercase__: Any = self.normalize( padded_inputs['input_features'] , attention_mask=_lowercase ) if return_tensors is not None: lowercase__: int = padded_inputs.convert_to_tensors(_lowercase ) return padded_inputs
196
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str=7 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=10 , UpperCAmelCase : str=18 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : Optional[int]=400 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=None , UpperCAmelCase : int=True , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase : int=None , ): A_ = size if size is not None else {"shortest_edge": 18} A_ = crop_size if crop_size is not None else {"height": 18, "width": 18} A_ = parent A_ = batch_size A_ = num_channels A_ = num_frames A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size A_ = do_normalize A_ = image_mean A_ = image_std A_ = crop_size def __A ( self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _a ( _lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = VivitImageProcessor if is_vision_available() else None def __A ( self : List[Any] ): A_ = VivitImageProcessingTester(self ) @property def __A ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : str ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , "image_mean" ) ) self.assertTrue(hasattr(_lowercase , "image_std" ) ) self.assertTrue(hasattr(_lowercase , "do_normalize" ) ) self.assertTrue(hasattr(_lowercase , "do_resize" ) ) self.assertTrue(hasattr(_lowercase , "do_center_crop" ) ) self.assertTrue(hasattr(_lowercase , "size" ) ) def __A ( self : int ): A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __A ( self : Tuple ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos A_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for video in video_inputs: self.assertIsInstance(_lowercase , _lowercase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input A_ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : List[str] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for video in video_inputs: self.assertIsInstance(_lowercase , _lowercase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input A_ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Optional[Any] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for video in video_inputs: self.assertIsInstance(_lowercase , _lowercase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input A_ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
312
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase__ ( snake_case_ :ndarray ): return np.dot(snake_case_ , snake_case_ ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , *, _lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ): __UpperCAmelCase = regularization __UpperCAmelCase = gamma if kernel == "linear": __UpperCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __UpperCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __UpperCAmelCase = F'''Unknown kernel: {kernel}''' raise ValueError(_lowercase ) def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ): return np.dot(_lowercase , _lowercase ) def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ): __UpperCAmelCase = observations __UpperCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__UpperCAmelCase) , ) = np.shape(_lowercase ) def to_minimize(_lowercase : ndarray ) -> float: __UpperCAmelCase = 0 ((__UpperCAmelCase) , ) = np.shape(_lowercase ) for i in range(_lowercase ): for j in range(_lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_lowercase ) __UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 ) __UpperCAmelCase = Bounds(0 , self.regularization ) __UpperCAmelCase = minimize( _lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x __UpperCAmelCase = l_star # calculating mean offset of separation plane to points __UpperCAmelCase = 0 for i in range(_lowercase ): for j in range(_lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __UpperCAmelCase = s / n def a ( self : List[Any] , _lowercase : ndarray ): __UpperCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ ( _lowerCAmelCase ): """simple docstring""" lowercase = "sew" def __init__( self : Union[str, Any] , snake_case_ : Any=32 , snake_case_ : Dict=768 , snake_case_ : Dict=12 , snake_case_ : Dict=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Tuple=2 , snake_case_ : List[str]="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : List[str]=0.0 , snake_case_ : int=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=0.02 , snake_case_ : Optional[Any]=1E-5 , snake_case_ : Tuple="group" , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : str=False , snake_case_ : Optional[int]=128 , snake_case_ : List[Any]=16 , snake_case_ : str=True , snake_case_ : str=0.05 , snake_case_ : int=10 , snake_case_ : List[Any]=2 , snake_case_ : str=0.0 , snake_case_ : str=10 , snake_case_ : int=0 , snake_case_ : int="mean" , snake_case_ : str=False , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=256 , snake_case_ : int=0 , snake_case_ : Tuple=1 , snake_case_ : Dict=2 , **snake_case_ : Any , ): super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase ) snake_case__ : Any = hidden_size snake_case__ : List[Any] = feat_extract_norm snake_case__ : Optional[Any] = feat_extract_activation snake_case__ : Dict = list(_lowercase ) snake_case__ : List[str] = list(_lowercase ) snake_case__ : Any = list(_lowercase ) snake_case__ : Dict = conv_bias snake_case__ : str = num_conv_pos_embeddings snake_case__ : Optional[Any] = num_conv_pos_embedding_groups snake_case__ : Optional[int] = len(self.conv_dim ) snake_case__ : str = num_hidden_layers snake_case__ : Any = intermediate_size snake_case__ : Union[str, Any] = squeeze_factor snake_case__ : Optional[Any] = hidden_act snake_case__ : Optional[Any] = num_attention_heads snake_case__ : List[str] = hidden_dropout snake_case__ : Optional[Any] = attention_dropout snake_case__ : List[Any] = activation_dropout snake_case__ : Optional[int] = feat_proj_dropout snake_case__ : Dict = final_dropout snake_case__ : Optional[Any] = layerdrop snake_case__ : Union[str, Any] = layer_norm_eps snake_case__ : Tuple = initializer_range snake_case__ : Optional[Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ : Optional[int] = apply_spec_augment snake_case__ : Dict = mask_time_prob snake_case__ : Dict = mask_time_length snake_case__ : Union[str, Any] = mask_time_min_masks snake_case__ : Union[str, Any] = mask_feature_prob snake_case__ : Any = mask_feature_length snake_case__ : int = mask_feature_min_masks # ctc loss snake_case__ : Optional[int] = ctc_loss_reduction snake_case__ : Optional[int] = ctc_zero_infinity # sequence classification snake_case__ : List[Any] = use_weighted_layer_sum snake_case__ : List[Any] = classifier_proj_size @property def lowerCamelCase ( self : int ): return functools.reduce(operator.mul , self.conv_stride , 1 )
35
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowercase : int = False class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :List[str] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]: return 1_2 @property def _lowerCamelCase ( self :str ) -> Any: return 1_2 @property def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]: return 3_2 @property def _lowerCamelCase ( self :List[str] ) -> Tuple: torch.manual_seed(0 ) __UpperCamelCase : Optional[Any] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _lowerCamelCase ( self :int ) -> Any: __UpperCamelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def _lowerCamelCase ( self :Tuple ) -> List[Any]: torch.manual_seed(0 ) __UpperCamelCase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(_lowercase ) @property def _lowerCamelCase ( self :List[Any] ) -> List[Any]: torch.manual_seed(0 ) __UpperCamelCase : Any = 1_2 __UpperCamelCase : List[str] = 1_2 __UpperCamelCase : Union[str, Any] = { "attention_bias": True, "cross_attention_dim": 3_2, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 3_2, "sample_size": width, "activation_fn": "geglu-approximate", } __UpperCamelCase : Union[str, Any] = TransformeraDModel(**_lowercase ) return model def _lowerCamelCase ( self :Tuple ) -> Optional[Any]: __UpperCamelCase : str = "cpu" __UpperCamelCase : List[str] = self.dummy_vqvae __UpperCamelCase : List[str] = self.dummy_text_encoder __UpperCamelCase : int = self.dummy_tokenizer __UpperCamelCase : List[Any] = self.dummy_transformer __UpperCamelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed ) __UpperCamelCase : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase ) __UpperCamelCase : int = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) __UpperCamelCase : int = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCamelCase : Optional[int] = "teddy bear playing in the pool" __UpperCamelCase : Optional[int] = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCamelCase : int = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" ) __UpperCamelCase : List[str] = output.images __UpperCamelCase : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCamelCase : Tuple = pipe( [prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0] __UpperCamelCase : int = image[0, -3:, -3:, -1] __UpperCamelCase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __UpperCamelCase : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self :Any ) -> str: __UpperCamelCase : Tuple = "cpu" __UpperCamelCase : Any = self.dummy_vqvae __UpperCamelCase : Dict = self.dummy_text_encoder __UpperCamelCase : Union[str, Any] = self.dummy_tokenizer __UpperCamelCase : List[str] = self.dummy_transformer __UpperCamelCase : Tuple = VQDiffusionScheduler(self.num_embed ) __UpperCamelCase : Dict = LearnedClassifierFreeSamplingEmbeddings( learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __UpperCamelCase : Optional[Any] = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) __UpperCamelCase : Any = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCamelCase : str = "teddy bear playing in the pool" __UpperCamelCase : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCamelCase : List[str] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" ) __UpperCamelCase : Any = output.images __UpperCamelCase : Dict = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCamelCase : List[str] = pipe( [prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0] __UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] __UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __UpperCamelCase : Tuple = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :Any ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple: __UpperCamelCase : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) __UpperCamelCase : Any = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) __UpperCamelCase : Optional[Any] = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __UpperCamelCase : Any = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCamelCase : Union[str, Any] = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , ) __UpperCamelCase : Dict = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
232
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : Tuple , _lowercase : str , _lowercase : str ): __UpperCAmelCase , __UpperCAmelCase = text, pattern __UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase ) def a ( self : Optional[int] , _lowercase : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self : int , _lowercase : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self : Optional[Any] ): # searches pattern in text and returns index positions __UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: __UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase : str = 'ABAABA' _lowercase : Tuple = 'AB' _lowercase : Dict = BoyerMooreSearch(text, pattern) _lowercase : Any = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
332
0
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(_lowerCAmelCase ) class __lowercase (_lowerCAmelCase ): def __init__( self , **A_ ) ->Union[str, Any]: '''simple docstring''' super().__init__(**_lowercase ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__( self , A_ , **A_ ) ->int: '''simple docstring''' return super().__call__(_lowercase , **_lowercase ) def UpperCamelCase__ ( self , **A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = {} if "candidate_labels" in kwargs: __lowerCAmelCase : Optional[int] = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __lowerCAmelCase : str = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def UpperCamelCase__ ( self , A_ , A_=None , A_="This is a sound of {}." ) ->Tuple: '''simple docstring''' if isinstance(_lowercase , _lowercase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __lowerCAmelCase : Tuple = requests.get(_lowercase ).content else: with open(_lowercase , '''rb''' ) as f: __lowerCAmelCase : Optional[int] = f.read() if isinstance(_lowercase , _lowercase ): __lowerCAmelCase : Optional[Any] = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate ) if not isinstance(_lowercase , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) __lowerCAmelCase : List[Any] = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) __lowerCAmelCase : List[Any] = candidate_labels __lowerCAmelCase : Union[str, Any] = [hypothesis_template.format(_lowercase ) for x in candidate_labels] __lowerCAmelCase : Optional[Any] = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase ) __lowerCAmelCase : Dict = [text_inputs] return inputs def UpperCamelCase__ ( self , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Tuple = model_inputs.pop('''candidate_labels''' ) __lowerCAmelCase : List[str] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , _lowercase ): __lowerCAmelCase : Dict = text_inputs[0] else: # Batching case. __lowerCAmelCase : Union[str, Any] = text_inputs[0][0] __lowerCAmelCase : Optional[Any] = self.model(**_lowercase , **_lowercase ) __lowerCAmelCase : List[str] = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : List[Any] = model_outputs.pop('''candidate_labels''' ) __lowerCAmelCase : List[Any] = model_outputs['''logits'''][0] if self.framework == "pt": __lowerCAmelCase : Optional[Any] = logits.softmax(dim=0 ) __lowerCAmelCase : Optional[Any] = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) __lowerCAmelCase : List[Any] = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda A_ : -x[0] ) ] return result
275
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCAmelCase : a__ : int a__ : Node | None = None a__ : Node | None = None def lowercase__ ( ): __UpperCAmelCase = Node(1 ) __UpperCAmelCase = Node(2 ) __UpperCAmelCase = Node(3 ) __UpperCAmelCase = Node(4 ) __UpperCAmelCase = Node(5 ) return tree def lowercase__ ( snake_case_ :Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__ ( snake_case_ :Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase__ ( snake_case_ :Node | None ): __UpperCAmelCase = [] if root is None: return output __UpperCAmelCase = deque([root] ) while process_queue: __UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None ): if root is None: return [] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 0 return output def lowercase__ ( ): # Main function for testing. __UpperCAmelCase = make_tree() print(F'''In-order Traversal: {inorder(snake_case_ )}''' ) print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' ) print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' ) print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(snake_case_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(snake_case_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a_ = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
152
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
332
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') SCREAMING_SNAKE_CASE_ : str = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(_lowercase)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1e-3)) @slow def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE_ : int = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(_lowercase)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1e-3))
91
"""simple docstring""" def lowercase__ ( snake_case_ :Union[str, Any] ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = max(snake_case_ ) __UpperCAmelCase = min(snake_case_ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowercase__ ( snake_case_ :str ): return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" _lowercase : int = input('Enter numbers separated by a comma:\n').strip() _lowercase : int = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
332
0
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = [] embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight", F"stage{idx}.patch_embed.proj.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias", F"stage{idx}.patch_embed.proj.bias", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight", F"stage{idx}.patch_embed.norm.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias", F"stage{idx}.patch_embed.norm.bias", ) ) return embed def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[Any] = [] attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight", F"stage{idx}.blocks.{cnt}.attn.proj_q.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias", F"stage{idx}.blocks.{cnt}.attn.proj_q.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight", F"stage{idx}.blocks.{cnt}.attn.proj_k.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias", F"stage{idx}.blocks.{cnt}.attn.proj_k.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight", F"stage{idx}.blocks.{cnt}.attn.proj_v.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias", F"stage{idx}.blocks.{cnt}.attn.proj_v.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight", F"stage{idx}.blocks.{cnt}.attn.proj.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias", F"stage{idx}.blocks.{cnt}.attn.proj.bias", ) ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") ) return attention_weights def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = [] token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') ) return token def __lowerCAmelCase (): __lowerCAmelCase : List[str] = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = 'imagenet-1k-id2label.json' __lowerCAmelCase : str = 1000 __lowerCAmelCase : int = 'huggingface/label-files' __lowerCAmelCase : List[str] = num_labels __lowerCAmelCase : Dict = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) ) __lowerCAmelCase : Dict = {int(snake_case_ ): v for k, v in idalabel.items()} __lowerCAmelCase : str = idalabel __lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()} __lowerCAmelCase : Union[str, Any] = CvtConfig(num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": __lowerCAmelCase : str = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": __lowerCAmelCase : List[Any] = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __lowerCAmelCase : str = [2, 2, 20] __lowerCAmelCase : Union[str, Any] = [3, 12, 16] __lowerCAmelCase : str = [192, 768, 1024] __lowerCAmelCase : str = CvtForImageClassification(snake_case_ ) __lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __lowerCAmelCase : Union[str, Any] = image_size __lowerCAmelCase : Union[str, Any] = torch.load(snake_case_ , map_location=torch.device('cpu' ) ) __lowerCAmelCase : Any = OrderedDict() __lowerCAmelCase : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __lowerCAmelCase : Union[str, Any] = list_of_state_dict + cls_token(snake_case_ ) __lowerCAmelCase : Union[str, Any] = list_of_state_dict + embeddings(snake_case_ ) for cnt in range(config.depth[idx] ): __lowerCAmelCase : str = list_of_state_dict + attention(snake_case_ , snake_case_ ) __lowerCAmelCase : Tuple = list_of_state_dict + final() for gg in list_of_state_dict: print(snake_case_ ) for i in range(len(snake_case_ ) ): __lowerCAmelCase : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you\'d like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
86
"""simple docstring""" from collections import defaultdict def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = first_str.lower().strip() __UpperCAmelCase = second_str.lower().strip() # Remove whitespace __UpperCAmelCase = first_str.replace(''' ''' , '''''' ) __UpperCAmelCase = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(snake_case_ ) != len(snake_case_ ): return False # Default values for count should be 0 __UpperCAmelCase = defaultdict(snake_case_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(snake_case_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _lowercase : List[Any] = input('Enter the first string ').strip() _lowercase : Tuple = input('Enter the second string ').strip() _lowercase : str = check_anagrams(input_a, input_b) print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
332
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class A ( _lowerCAmelCase ): __UpperCAmelCase : List[Any] = "xmod" def __init__(self : Tuple , __UpperCAmelCase : Optional[Any]=3_0_5_2_2 , __UpperCAmelCase : Optional[Any]=7_6_8 , __UpperCAmelCase : List[str]=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : int=3_0_7_2 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[str]=5_1_2 , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : int=1E-12 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=False , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=("en_XX",) , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = position_embedding_type UpperCAmelCase__ = use_cache UpperCAmelCase__ = classifier_dropout UpperCAmelCase__ = pre_norm UpperCAmelCase__ = adapter_reduction_factor UpperCAmelCase__ = adapter_layer_norm UpperCAmelCase__ = adapter_reuse_layer_norm UpperCAmelCase__ = ln_before_adapter UpperCAmelCase__ = list(_lowercase ) UpperCAmelCase__ = default_language class A ( _lowerCAmelCase ): @property def lowercase_ (self : List[str] ) -> List[str]: """simple docstring""" if self.task == "multiple-choice": UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
65
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Union[str, Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ): __UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) # set architectures equal to `None` __UpperCAmelCase = None __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Tuple ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Any ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def a ( self : List[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : str ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
332
0
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a = logging.get_logger('transformers.models.encodec') a = { 'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg', } a = { 'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv', } a = { 'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm', } a = { 'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv', } a = { 'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm', } a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a = [] a = [] def lowercase (snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Tuple ) -> int: '''simple docstring''' for attribute in key.split(""".""" ): lowerCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase = value elif weight_type == "weight_g": lowerCAmelCase = value elif weight_type == "weight_v": lowerCAmelCase = value elif weight_type == "bias": lowerCAmelCase = value elif weight_type == "running_mean": lowerCAmelCase = value elif weight_type == "running_var": lowerCAmelCase = value elif weight_type == "num_batches_tracked": lowerCAmelCase = value elif weight_type == "weight_ih_l0": lowerCAmelCase = value elif weight_type == "weight_hh_l0": lowerCAmelCase = value elif weight_type == "bias_ih_l0": lowerCAmelCase = value elif weight_type == "bias_hh_l0": lowerCAmelCase = value elif weight_type == "weight_ih_l1": lowerCAmelCase = value elif weight_type == "weight_hh_l1": lowerCAmelCase = value elif weight_type == "bias_ih_l1": lowerCAmelCase = value elif weight_type == "bias_hh_l1": lowerCAmelCase = value else: lowerCAmelCase = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def lowercase (snake_case__ : Tuple , snake_case__ : str ) -> Dict: '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase (snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict ) -> List[Any]: '''simple docstring''' lowerCAmelCase = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCAmelCase = MAPPING_24K elif model_name == "encodec_48khz": lowerCAmelCase = MAPPING_48K else: raise ValueError(f'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(snake_case_ , snake_case_ ): logger.info(f'''{name} was ignored''' ) continue lowerCAmelCase = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" ) if prefix in name and suffix in name: lowerCAmelCase = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue lowerCAmelCase = True if "*" in mapped_key: lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2] lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ ) if "weight_g" in name: lowerCAmelCase = """weight_g""" elif "weight_v" in name: lowerCAmelCase = """weight_v""" elif "weight_ih_l0" in name: lowerCAmelCase = """weight_ih_l0""" elif "weight_hh_l0" in name: lowerCAmelCase = """weight_hh_l0""" elif "bias_ih_l0" in name: lowerCAmelCase = """bias_ih_l0""" elif "bias_hh_l0" in name: lowerCAmelCase = """bias_hh_l0""" elif "weight_ih_l1" in name: lowerCAmelCase = """weight_ih_l1""" elif "weight_hh_l1" in name: lowerCAmelCase = """weight_hh_l1""" elif "bias_ih_l1" in name: lowerCAmelCase = """bias_ih_l1""" elif "bias_hh_l1" in name: lowerCAmelCase = """bias_hh_l1""" elif "bias" in name: lowerCAmelCase = """bias""" elif "weight" in name: lowerCAmelCase = """weight""" elif "running_mean" in name: lowerCAmelCase = """running_mean""" elif "running_var" in name: lowerCAmelCase = """running_var""" elif "num_batches_tracked" in name: lowerCAmelCase = """num_batches_tracked""" else: lowerCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) @torch.no_grad() def lowercase (snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : int=None , ) -> Optional[int]: '''simple docstring''' if config_path is not None: lowerCAmelCase = EncodecConfig.from_pretrained(snake_case_ ) else: lowerCAmelCase = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCAmelCase = [8, 5, 4, 4] lowerCAmelCase = [2.2] lowerCAmelCase = 64 lowerCAmelCase = 32_000 lowerCAmelCase = 2_048 lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False elif model_name == "encodec_48khz": lowerCAmelCase = [8, 5, 4, 2] lowerCAmelCase = [3.0, 6.0, 12.0, 24.0] lowerCAmelCase = 48_000 lowerCAmelCase = 2 lowerCAmelCase = False lowerCAmelCase = """time_group_norm""" lowerCAmelCase = True lowerCAmelCase = 1.0 lowerCAmelCase = 0.01 else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase = EncodecModel(snake_case_ ) lowerCAmelCase = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(snake_case_ ) lowerCAmelCase = torch.load(snake_case_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCAmelCase = original_checkpoint["""best_state"""] recursively_load_weights(snake_case_ , snake_case_ , snake_case_ ) model.save_pretrained(snake_case_ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(snake_case_ ) model.push_to_hub(snake_case_ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '--model', default='encodec_24khz', type=str, help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) a = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
155
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _UpperCAmelCase ( _lowerCAmelCase ): def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ): if tokenize_kwargs is None: __UpperCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __UpperCAmelCase = truncation __UpperCAmelCase = tokenize_kwargs __UpperCAmelCase = {} if return_tensors is not None: __UpperCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): __UpperCAmelCase = self.framework __UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) return model_inputs def a ( self : List[str] , _lowercase : Tuple ): __UpperCAmelCase = self.model(**_lowercase ) return model_outputs def a ( self : int , _lowercase : Tuple , _lowercase : str=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): return super().__call__(*_lowercase , **_lowercase )
332
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
220
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowercase : Union[str, Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowercase__ ( snake_case_ :List[Any] ): if isinstance(snake_case_ , torch.Tensor ): return image elif isinstance(snake_case_ , PIL.Image.Image ): __UpperCAmelCase = [image] __UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image] __UpperCAmelCase = torch.stack(snake_case_ ) return image class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Any , _lowercase : str , _lowercase : str ): super().__init__() # make sure scheduler can always be converted to DDIM __UpperCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowercase , scheduler=_lowercase ) def a ( self : int , _lowercase : List[str] ): if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): # get the original timestep using init_timestep __UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase ) __UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ): if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' ) __UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __UpperCAmelCase = init_latents.shape __UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents print('''add noise to latents at timestep''' , _lowercase ) __UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = init_latents return latents @torch.no_grad() def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ): self.check_inputs(_lowercase ) # 2. Preprocess image __UpperCAmelCase = preprocess(_lowercase ) # 3. set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) __UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device ) __UpperCAmelCase = timesteps[:1].repeat(_lowercase ) # 4. Prepare latent variables __UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase ) __UpperCAmelCase = latents # 5. Denoising loop for t in self.progress_bar(_lowercase ): # 1. predict noise model_output __UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step( _lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowercase )
332
0
import math def snake_case_ ( snake_case , snake_case ) -> str: lowercase__: str = len(snake_case_ ) lowercase__: List[Any] = int(math.floor(math.sqrt(snake_case_ ) ) ) lowercase__: str = 0 while arr[min(snake_case_ , snake_case_ ) - 1] < x: lowercase__: str = step step += int(math.floor(math.sqrt(snake_case_ ) ) ) if prev >= n: return -1 while arr[prev] < x: lowercase__: Dict = prev + 1 if prev == min(snake_case_ , snake_case_ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": __lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() __lowerCAmelCase = [int(item) for item in user_input.split(''',''')] __lowerCAmelCase = int(input('''Enter the number to be searched:\n''')) __lowerCAmelCase = jump_search(arr, x) if res == -1: print('''Number not found!''') else: print(F'''Number {x} is at index {res}''')
196
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Union[str, Any] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
332
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin __a :Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __a :List[str] = 25_0004 __a :int = 25_0020 @require_sentencepiece @require_tokenizers class _a ( _lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = MBartaaTokenizer _lowerCamelCase : List[str] = MBartaaTokenizerFast _lowerCamelCase : Any = True _lowerCamelCase : List[str] = True def __A ( self : str ): super().setUp() # We have a SentencePiece fixture for testing A_ = MBartaaTokenizer(_lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Dict ): A_ = "<s>" A_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def __A ( self : Optional[Any] ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_lowercase ) , 1054 ) def __A ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def __A ( self : str ): A_ = MBartaaTokenizer(_lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowercase ) A_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) A_ = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A_ = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __A ( self : str ): # fmt: off A_ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def __A ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) A_ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) A_ = tempfile.mkdtemp() A_ = tokenizer_r.save_pretrained(_lowercase ) A_ = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) A_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way A_ = tokenizer_r.from_pretrained(_lowercase ) A_ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True A_ = tempfile.mkdtemp() A_ = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) A_ = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way A_ = tokenizer_r.from_pretrained(_lowercase ) A_ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False A_ = tempfile.mkdtemp() A_ = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) A_ = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A_ = tokenizer_r.from_pretrained(_lowercase ) A_ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : str = "facebook/mbart-large-50-one-to-many-mmt" _lowerCamelCase : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] _lowerCamelCase : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] _lowerCamelCase : Any = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def __A ( cls : Tuple ): A_ = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) A_ = 1 return cls def __A ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def __A ( self : Union[str, Any] ): A_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def __A ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) A_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] A_ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def __A ( self : Optional[Any] ): A_ = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , _lowercase ) A_ = 10 A_ = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def __A ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def __A ( self : Union[str, Any] ): A_ = tempfile.mkdtemp() A_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) A_ = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def __A ( self : Dict ): A_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors="pt" ) A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __A ( self : Union[str, Any] ): A_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) A_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __A ( self : Union[str, Any] ): A_ = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="pt" ) A_ = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors="pt" ) A_ = targets["input_ids"] A_ = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __A ( self : Dict ): A_ = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
312
"""simple docstring""" _lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
0
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __a = pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: inspect_dataset(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = path + """.py""" assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: inspect_metric(snake_case_ , snake_case_ ) snake_case__ : Any = path + """.py""" assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_dataset_config_info(snake_case_ , config_name=snake_case_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: with pytest.raises(snake_case_ ): get_dataset_config_info(snake_case_ , config_name=snake_case_ ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: snake_case__ : List[Any] = get_dataset_config_names(snake_case_ ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : int = get_dataset_infos(snake_case_ ) assert list(infos.keys() ) == expected_configs snake_case__ : str = expected_configs[0] assert expected_config in infos snake_case__ : Optional[Any] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : str = get_dataset_infos(snake_case_ ) assert expected_config in infos snake_case__ : int = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: with pytest.raises(snake_case_ ): get_dataset_split_names(snake_case_ , config_name=snake_case_ )
35
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase__ ( snake_case_ :Optional[int] ): return EnvironmentCommand() def lowercase__ ( snake_case_ :List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class _UpperCAmelCase ( _lowerCAmelCase ): @staticmethod def a ( _lowercase : ArgumentParser ): __UpperCAmelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=_lowercase ) download_parser.add_argument( '''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_lowercase ) def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ): __UpperCAmelCase = accelerate_config_file def a ( self : Dict ): __UpperCAmelCase = '''not installed''' if is_safetensors_available(): import safetensors __UpperCAmelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors __UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = __UpperCAmelCase = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __UpperCAmelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_lowercase ): __UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict() __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_lowercase , _lowercase ) else F'''\t{accelerate_config}''' ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_torch_available(): import torch __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_tf_available(): import tensorflow as tf __UpperCAmelCase = tf.__version__ try: # deprecated in v2.1 __UpperCAmelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''not installed''' __UpperCAmelCase = '''NA''' if is_flax_available(): import flax import jax import jaxlib __UpperCAmelCase = flax.__version__ __UpperCAmelCase = jax.__version__ __UpperCAmelCase = jaxlib.__version__ __UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform __UpperCAmelCase = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_lowercase ) ) return info @staticmethod def a ( _lowercase : str ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
332
0
import os def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' with open(os.path.dirname(snake_case_) + "/p022_names.txt") as file: __UpperCamelCase : Union[str, Any] = str(file.readlines()[0]) __UpperCamelCase : Any = names.replace("\"" , "").split(",") names.sort() __UpperCamelCase : Dict = 0 __UpperCamelCase : Optional[int] = 0 for i, name in enumerate(snake_case_): for letter in name: name_score += ord(snake_case_) - 64 total_score += (i + 1) * name_score __UpperCamelCase : Dict = 0 return total_score if __name__ == "__main__": print(solution())
232
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ): __UpperCAmelCase = sorted(numsa + numsa ) __UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()] _lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
332
0
import argparse import copy def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __lowerCAmelCase : str = [] _list.append([line.split()[1], line.split()[2]] ) __lowerCAmelCase : List[str] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __lowerCAmelCase : Optional[int] = [] _list.append([line.split()[0], line.split()[2]] ) __lowerCAmelCase : Dict = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _lowercase ( lowercase__ , lowercase__ ): with open(snake_case_ ) as f: __lowerCAmelCase : Tuple = f.read(1 ) __lowerCAmelCase : int = start_node __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Tuple = start_node __lowerCAmelCase : List[Any] = 0 while visiting not in first_solution: __lowerCAmelCase : Dict = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __lowerCAmelCase : Optional[Any] = k[1] __lowerCAmelCase : Optional[Any] = k[0] first_solution.append(snake_case_ ) __lowerCAmelCase : List[Any] = distance_of_first_solution + int(snake_case_ ) __lowerCAmelCase : List[Any] = best_node first_solution.append(snake_case_ ) __lowerCAmelCase : List[str] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __lowerCAmelCase : List[Any] = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = [] for n in solution[1:-1]: __lowerCAmelCase : str = solution.index(snake_case_ ) for kn in solution[1:-1]: __lowerCAmelCase : int = solution.index(snake_case_ ) if n == kn: continue __lowerCAmelCase : Dict = copy.deepcopy(snake_case_ ) __lowerCAmelCase : Optional[int] = kn __lowerCAmelCase : Optional[Any] = n __lowerCAmelCase : Union[str, Any] = 0 for k in _tmp[:-1]: __lowerCAmelCase : Optional[Any] = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __lowerCAmelCase : Tuple = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : int = 1 __lowerCAmelCase : Optional[Any] = first_solution __lowerCAmelCase : Any = [] __lowerCAmelCase : List[str] = distance_of_first_solution __lowerCAmelCase : str = solution while count <= iters: __lowerCAmelCase : Optional[int] = find_neighborhood(snake_case_ , snake_case_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : Union[str, Any] = neighborhood[index_of_best_solution] __lowerCAmelCase : List[Any] = len(snake_case_ ) - 1 __lowerCAmelCase : Union[str, Any] = False while not found: __lowerCAmelCase : Any = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __lowerCAmelCase : Optional[int] = best_solution[i] __lowerCAmelCase : Dict = solution[i] break __lowerCAmelCase : List[Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __lowerCAmelCase : List[str] = True __lowerCAmelCase : Union[str, Any] = best_solution[:-1] __lowerCAmelCase : Optional[int] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __lowerCAmelCase : Optional[int] = cost __lowerCAmelCase : Dict = solution else: __lowerCAmelCase : Any = index_of_best_solution + 1 __lowerCAmelCase : Optional[int] = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __lowerCAmelCase : Union[str, Any] = count + 1 return best_solution_ever, best_cost def _lowercase ( lowercase__=None ): __lowerCAmelCase : int = generate_neighbours(args.File ) __lowerCAmelCase, __lowerCAmelCase : Tuple = generate_first_solution( args.File , snake_case_ ) __lowerCAmelCase, __lowerCAmelCase : List[Any] = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
275
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ): __UpperCAmelCase = str(id_ ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = [] __UpperCAmelCase = {} # {vertex:distance} def __lt__( self : str , _lowercase : List[Any] ): return self.key < other.key def __repr__( self : int ): return self.id def a ( self : Union[str, Any] , _lowercase : int ): self.neighbors.append(_lowercase ) def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ): __UpperCAmelCase = weight def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): __UpperCAmelCase = [] for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = graph[:] while q: __UpperCAmelCase = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ): for u in graph: __UpperCAmelCase = math.inf __UpperCAmelCase = None __UpperCAmelCase = 0 __UpperCAmelCase = list(snake_case_ ) hq.heapify(snake_case_ ) while h: __UpperCAmelCase = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCAmelCase = u __UpperCAmelCase = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase__ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' def _a( UpperCamelCase__ : float, UpperCamelCase__ : float ): '''simple docstring''' if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
152
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Dict = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "swinv2" a__ : List[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ): super().__init__(**_lowercase ) __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = len(_lowercase ) __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) __UpperCAmelCase = (0, 0, 0, 0)
332
0
"""simple docstring""" def _A (__a ) -> Union[str, Any]: """simple docstring""" if collection == []: return [] # get some information about the collection SCREAMING_SNAKE_CASE_ : List[Any] = len(snake_case_ ) SCREAMING_SNAKE_CASE_ : str = max(snake_case_ ) SCREAMING_SNAKE_CASE_ : Any = min(snake_case_ ) # create the counting array SCREAMING_SNAKE_CASE_ : Union[str, Any] = coll_max + 1 - coll_min SCREAMING_SNAKE_CASE_ : List[str] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): SCREAMING_SNAKE_CASE_ : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection SCREAMING_SNAKE_CASE_ : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): SCREAMING_SNAKE_CASE_ : List[Any] = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _A (__a ) -> List[str]: """simple docstring""" return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" UpperCAmelCase_ : int = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase_ : int = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
91
"""simple docstring""" import pprint import requests _lowercase : Optional[Any] = 'https://zenquotes.io/api' def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase__ ( ): return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": _lowercase : int = random_quotes() pprint.pprint(response)
332
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ ) __lowerCAmelCase : List[str] = downstream_dict['projector.weight'] __lowerCAmelCase : Union[str, Any] = downstream_dict['projector.bias'] __lowerCAmelCase : Union[str, Any] = downstream_dict['model.post_net.linear.weight'] __lowerCAmelCase : Tuple = downstream_dict['model.post_net.linear.bias'] return model def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[int] = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ ) __lowerCAmelCase : List[str] = downstream_dict['model.linear.weight'] __lowerCAmelCase : int = downstream_dict['model.linear.bias'] return model def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ ) __lowerCAmelCase : Dict = downstream_dict['connector.weight'] __lowerCAmelCase : List[Any] = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __lowerCAmelCase : Dict = downstream_dict[ F"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] __lowerCAmelCase : Any = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"] __lowerCAmelCase : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] __lowerCAmelCase : Any = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] __lowerCAmelCase : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] __lowerCAmelCase : str = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] __lowerCAmelCase : Optional[Any] = downstream_dict['objective.W'] return model @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = torch.load(snake_case_ , map_location='cpu' ) __lowerCAmelCase : int = checkpoint['Downstream'] __lowerCAmelCase : List[str] = WavaVecaConfig.from_pretrained(snake_case_ ) __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained( snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ ) __lowerCAmelCase : int = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): __lowerCAmelCase : Any = convert_classification(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForAudioFrameClassification' ): __lowerCAmelCase : str = convert_diarization(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForXVector' ): __lowerCAmelCase : Optional[Any] = convert_xvector(snake_case_ , snake_case_ , snake_case_ ) else: raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: __lowerCAmelCase : Optional[int] = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") lowerCamelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
86
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
0
import os from pathlib import Path def lowerCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' from torch.utils.cpp_extension import load UpperCAmelCase__ = Path(snake_case_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr" UpperCAmelCase__ = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu", "ms_deform_attn_cpu.cpp" ), os.path.join("cuda", "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention", snake_case_, with_cuda=snake_case_, extra_include_paths=[str(snake_case_ )], extra_cflags=["-DWITH_CUDA=1"], extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ], ) import MultiScaleDeformableAttention as MSDA return MSDA
65
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase__ ( snake_case_ :Union[str, Any]=None ): if subparsers is not None: __UpperCAmelCase = subparsers.add_parser('''env''' ) else: __UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.__version__ __UpperCAmelCase = torch.cuda.is_available() __UpperCAmelCase = is_xpu_available() __UpperCAmelCase = is_npu_available() __UpperCAmelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(snake_case_ ): __UpperCAmelCase = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(snake_case_ ), '''PyTorch NPU available''': str(snake_case_ ), '''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: __UpperCAmelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __UpperCAmelCase = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case_ , snake_case_ ) else F'''\t{accelerate_config}''' ) print(snake_case_ ) __UpperCAmelCase = accelerate_config return info def lowercase__ ( ): __UpperCAmelCase = env_command_parser() __UpperCAmelCase = parser.parse_args() env_command(snake_case_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
0
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets a = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' a = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' a = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def __lowercase ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def __lowercase ( self : Optional[Any] ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def __lowercase ( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]="uniform_average" , lowerCAmelCase : Tuple=True ): lowerCAmelCase = mean_squared_error( _lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase ) return {"mse": mse}
155
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 25_00_04 _lowercase : int = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Any = True a__ : List[str] = True def a ( self : str ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : str ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : str ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : str = "facebook/mbart-large-50-one-to-many-mmt" a__ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Optional[Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
332
0
"""simple docstring""" import operator def _SCREAMING_SNAKE_CASE ( __snake_case : list , __snake_case : bool = False , __snake_case : list | None = None ): '''simple docstring''' lowercase = operator.lt if reverse else operator.gt lowercase = solution or [] if not arr: return solution lowercase = [arr.pop(0 )] for i, item in enumerate(snake_case_ ): if _operator(snake_case_ , sublist[-1] ): sublist.append(snake_case_ ) arr.pop(snake_case_ ) # merging sublist into solution list if not solution: solution.extend(snake_case_ ) else: while sublist: lowercase = sublist.pop(0 ) for i, xx in enumerate(snake_case_ ): if not _operator(snake_case_ , snake_case_ ): solution.insert(snake_case_ , snake_case_ ) break else: solution.append(snake_case_ ) strand_sort(snake_case_ , snake_case_ , snake_case_ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
220
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
0
from pathlib import Path import fire def snake_case_ ( snake_case , snake_case , snake_case ) -> Dict: lowercase__: Union[str, Any] = Path(snake_case_ ) lowercase__: str = Path(snake_case_ ) dest_dir.mkdir(exist_ok=snake_case_ ) for path in src_dir.iterdir(): lowercase__: Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n] lowercase__: Any = dest_dir.joinpath(path.name ) print(snake_case_ ) dest_path.open('w' ).write('\n'.join(snake_case_ ) ) if __name__ == "__main__": fire.Fire(minify)
196
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __A ( self : Optional[int] ): A_ = 1 A_ = 3 A_ = (32, 32) A_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase ) return image @property def __A ( self : List[str] ): torch.manual_seed(0 ) A_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def __A ( self : List[str] ): torch.manual_seed(0 ) A_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def __A ( self : Dict ): torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_lowercase ) @property def __A ( self : List[str] ): def extract(*UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ): class _a : """simple docstring""" def __init__( self : Optional[int] ): A_ = torch.ones([0] ) def __A ( self : Optional[int] , UpperCAmelCase : List[Any] ): self.pixel_values.to(_lowercase ) return self return Out() return extract def __A ( self : Dict ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.dummy_cond_unet A_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = "A painting of a squirrel eating a burger" A_ = torch.Generator(device=_lowercase ).manual_seed(0 ) A_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) A_ = output.images A_ = torch.Generator(device=_lowercase ).manual_seed(0 ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowercase , )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : int ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.dummy_cond_unet A_ = PNDMScheduler(skip_prk_steps=_lowercase ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = "A painting of a squirrel eating a burger" A_ = torch.Generator(device=_lowercase ).manual_seed(0 ) A_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) A_ = output.images A_ = torch.Generator(device=_lowercase ).manual_seed(0 ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowercase , )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : str ): A_ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(pipe.scheduler , _lowercase ) assert pipe.safety_checker is None A_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowercase ) A_ = StableDiffusionPipeline.from_pretrained(_lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None A_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __A ( self : Optional[Any] ): A_ = self.dummy_cond_unet A_ = PNDMScheduler(skip_prk_steps=_lowercase ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 A_ = unet.half() A_ = vae.half() A_ = bert.half() # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = "A painting of a squirrel eating a burger" A_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : Any ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowercase ) A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) A_ = 4003660346 A_ = 7 # without safety guidance (sld_guidance_scale = 0) A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : List[str] ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowercase ) A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = "padme amidala taking a bath artwork, safe for work, no nudity" A_ = 2734971755 A_ = 7 A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : Tuple ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) A_ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) A_ = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) A_ = 1044355234 A_ = 12 A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 A_ = torch.manual_seed(_lowercase ) A_ = sd_pipe( [prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
312
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase__ ( snake_case_ :ndarray ): return np.dot(snake_case_ , snake_case_ ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , *, _lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ): __UpperCAmelCase = regularization __UpperCAmelCase = gamma if kernel == "linear": __UpperCAmelCase = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) __UpperCAmelCase = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __UpperCAmelCase = F'''Unknown kernel: {kernel}''' raise ValueError(_lowercase ) def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ): return np.dot(_lowercase , _lowercase ) def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ): __UpperCAmelCase = observations __UpperCAmelCase = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__UpperCAmelCase) , ) = np.shape(_lowercase ) def to_minimize(_lowercase : ndarray ) -> float: __UpperCAmelCase = 0 ((__UpperCAmelCase) , ) = np.shape(_lowercase ) for i in range(_lowercase ): for j in range(_lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_lowercase ) __UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 ) __UpperCAmelCase = Bounds(0 , self.regularization ) __UpperCAmelCase = minimize( _lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x __UpperCAmelCase = l_star # calculating mean offset of separation plane to points __UpperCAmelCase = 0 for i in range(_lowercase ): for j in range(_lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) __UpperCAmelCase = s / n def a ( self : List[Any] , _lowercase : ndarray ): __UpperCAmelCase = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = 42 lowercase = None lowercase = None def __snake_case( ) -> List[str]: snake_case__ : Optional[int] = Node(1 ) snake_case__ : Any = Node(2 ) snake_case__ : Union[str, Any] = Node(3 ) snake_case__ : Tuple = Node(4 ) snake_case__ : str = Node(5 ) return tree def __snake_case( _lowerCAmelCase ) -> Tuple: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def __snake_case( _lowerCAmelCase ) -> List[Any]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def __snake_case( _lowerCAmelCase ) -> Optional[Any]: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def __snake_case( _lowerCAmelCase ) -> Optional[Any]: return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = [] if root is None: return output snake_case__ : int = deque([root] ) while process_queue: snake_case__ : Optional[Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: snake_case__ : Optional[int] = [] def populate_output(_lowerCAmelCase , _lowerCAmelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: snake_case__ : List[str] = [] def populate_output(_lowerCAmelCase , _lowerCAmelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def __snake_case( _lowerCAmelCase ) -> List[Any]: if root is None: return [] snake_case__ : List[str] = [] snake_case__ : Any = 0 snake_case__ : Dict = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) snake_case__ : Optional[int] = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) snake_case__ : Union[str, Any] = 0 return output def __snake_case( ) -> List[str]: # Main function for testing. snake_case__ : Optional[Any] = make_tree() print(f"In-order Traversal: {inorder(snake_case_ )}" ) print(f"Pre-order Traversal: {preorder(snake_case_ )}" ) print(f"Post-order Traversal: {postorder(snake_case_ )}" , """\n""" ) print(f"Height of Tree: {height(snake_case_ )}" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(snake_case_ ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(snake_case_ ) + 1 ): print(f"Level {level}:" , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
35
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase__ ( _lowerCAmelCase): '''simple docstring''' def __init__( self :int , a :TransformeraDModel , a :AutoencoderKL , a :KarrasDiffusionSchedulers , a :Optional[Dict[int, str]] = None , ) -> str: super().__init__() self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase ) # create a imagenet -> id dictionary for easier use __UpperCamelCase : Any = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): __UpperCamelCase : Optional[int] = int(_lowercase ) __UpperCamelCase : Dict = dict(sorted(self.labels.items() ) ) def _lowerCamelCase ( self :str , a :Union[str, List[str]] ) -> Optional[Any]: if not isinstance(_lowercase , _lowercase ): __UpperCamelCase : Optional[int] = list(_lowercase ) for l in label: if l not in self.labels: raise ValueError( f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self :List[str] , a :List[int] , a :float = 4.0 , a :Optional[Union[torch.Generator, List[torch.Generator]]] = None , a :int = 5_0 , a :Optional[str] = "pil" , a :bool = True , ) -> str: __UpperCamelCase : Dict = len(_lowercase ) __UpperCamelCase : List[str] = self.transformer.config.sample_size __UpperCamelCase : List[str] = self.transformer.config.in_channels __UpperCamelCase : Union[str, Any] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , ) __UpperCamelCase : List[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __UpperCamelCase : Tuple = torch.tensor(_lowercase , device=self.device ).reshape(-1 ) __UpperCamelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device ) __UpperCamelCase : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __UpperCamelCase : Dict = latent_model_input[: len(_lowercase ) // 2] __UpperCamelCase : Optional[Any] = torch.cat([half, half] , dim=0 ) __UpperCamelCase : str = self.scheduler.scale_model_input(_lowercase , _lowercase ) __UpperCamelCase : Any = t if not torch.is_tensor(_lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __UpperCamelCase : Optional[int] = latent_model_input.device.type == "mps" if isinstance(_lowercase , _lowercase ): __UpperCamelCase : List[Any] = torch.floataa if is_mps else torch.floataa else: __UpperCamelCase : Dict = torch.intaa if is_mps else torch.intaa __UpperCamelCase : Tuple = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __UpperCamelCase : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCamelCase : Tuple = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __UpperCamelCase : Optional[Any] = self.transformer( _lowercase , timestep=_lowercase , class_labels=_lowercase ).sample # perform guidance if guidance_scale > 1: __UpperCamelCase , __UpperCamelCase : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __UpperCamelCase , __UpperCamelCase : Dict = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 ) __UpperCamelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __UpperCamelCase : List[str] = torch.cat([half_eps, half_eps] , dim=0 ) __UpperCamelCase : int = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __UpperCamelCase , __UpperCamelCase : List[Any] = torch.split(_lowercase , _lowercase , dim=1 ) else: __UpperCamelCase : List[Any] = noise_pred # compute previous image: x_t -> x_t-1 __UpperCamelCase : Optional[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample if guidance_scale > 1: __UpperCamelCase , __UpperCamelCase : List[str] = latent_model_input.chunk(2 , dim=0 ) else: __UpperCamelCase : Any = latent_model_input __UpperCamelCase : Any = 1 / self.vae.config.scaling_factor * latents __UpperCamelCase : Tuple = self.vae.decode(_lowercase ).sample __UpperCamelCase : str = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCamelCase : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCamelCase : List[str] = self.numpy_to_pil(_lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=_lowercase )
232
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : Tuple , _lowercase : str , _lowercase : str ): __UpperCAmelCase , __UpperCAmelCase = text, pattern __UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase ) def a ( self : Optional[int] , _lowercase : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self : int , _lowercase : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self : Optional[Any] ): # searches pattern in text and returns index positions __UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase = self.mismatch_in_text(_lowercase ) if mismatch_index == -1: positions.append(_lowercase ) else: __UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase : str = 'ABAABA' _lowercase : Tuple = 'AB' _lowercase : Dict = BoyerMooreSearch(text, pattern) _lowercase : Any = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
332
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _UpperCamelCase = logging.get_logger(__name__) @dataclass class __lowercase (_lowerCAmelCase ): _UpperCamelCase = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **A_ ) ->Dict: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase : List[Any] = deprecated_arg[3:] __lowerCAmelCase : List[str] = not kwargs.pop(_lowercase ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase : int = kwargs.pop('''tpu_name''' , self.tpu_name ) __lowerCAmelCase : Optional[Any] = kwargs.pop('''device_idx''' , self.device_idx ) __lowerCAmelCase : Tuple = kwargs.pop('''eager_mode''' , self.eager_mode ) __lowerCAmelCase : List[Any] = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**_lowercase ) _UpperCamelCase = field( default=_lowerCAmelCase , metadata={"""help""": """Name of TPU"""} , ) _UpperCamelCase = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _UpperCamelCase = field(default=_lowerCAmelCase , metadata={"""help""": """Benchmark models in eager model."""} ) _UpperCamelCase = field( default=_lowerCAmelCase , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' requires_backends(self , ['''tf'''] ) __lowerCAmelCase : Any = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase : Tuple = None return tpu @cached_property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) __lowerCAmelCase : List[str] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU __lowerCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' return self.n_gpu > 0
275
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCAmelCase : a__ : int a__ : Node | None = None a__ : Node | None = None def lowercase__ ( ): __UpperCAmelCase = Node(1 ) __UpperCAmelCase = Node(2 ) __UpperCAmelCase = Node(3 ) __UpperCAmelCase = Node(4 ) __UpperCAmelCase = Node(5 ) return tree def lowercase__ ( snake_case_ :Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__ ( snake_case_ :Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__ ( snake_case_ :Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase__ ( snake_case_ :Node | None ): __UpperCAmelCase = [] if root is None: return output __UpperCAmelCase = deque([root] ) while process_queue: __UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ): __UpperCAmelCase = [] def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case_ , snake_case_ ) return output def lowercase__ ( snake_case_ :Node | None ): if root is None: return [] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = height(snake_case_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) ) __UpperCAmelCase = 0 return output def lowercase__ ( ): # Main function for testing. __UpperCAmelCase = make_tree() print(F'''In-order Traversal: {inorder(snake_case_ )}''' ) print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' ) print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' ) print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(snake_case_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(snake_case_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
0
'''simple docstring''' from __future__ import annotations def _a( UpperCamelCase__ : list[float], UpperCamelCase__ : list[float] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =divmod(len(snake_case_ ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() a_ = [float(x) for x in input('Enter the elements of first array: ').split()] a_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
152
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def a ( self : str ): __UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
332
0
"""simple docstring""" def _A (__a , __a ) -> Any: """simple docstring""" _enforce_args(snake_case_ , snake_case_ ) if n == 0: return 0 SCREAMING_SNAKE_CASE_ : List[Any] = float('''-inf''' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ : Dict = max( snake_case_ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case_ ) ) return max_revue def _A (__a , __a ) -> Tuple: """simple docstring""" _enforce_args(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE_ : List[Any] = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_ , snake_case_ , snake_case_ ) def _A (__a , __a , __a ) -> Any: """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE_ : int = float('''-inf''' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ : Optional[Any] = max( snake_case_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case_ , snake_case_ ) , ) SCREAMING_SNAKE_CASE_ : Any = max_revenue return max_rev[n] def _A (__a , __a ) -> Tuple: """simple docstring""" _enforce_args(snake_case_ , snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE_ : Tuple = [float('''-inf''' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE_ : Any = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE_ : Dict = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE_ : Optional[Any] = max(snake_case_ , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE_ : str = max_revenue_i return max_rev[n] def _A (__a , __a ) -> int: """simple docstring""" if n < 0: SCREAMING_SNAKE_CASE_ : Dict = f'n must be greater than or equal to 0. Got n = {n}' raise ValueError(snake_case_ ) if n > len(snake_case_ ): SCREAMING_SNAKE_CASE_ : Tuple = ( '''Each integral piece of rod must have a corresponding price. ''' f'Got n = {n} but length of prices = {len(snake_case_ )}' ) raise ValueError(snake_case_ ) def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE_ : Any = 36 SCREAMING_SNAKE_CASE_ : List[Any] = top_down_cut_rod(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE_ : Any = bottom_up_cut_rod(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = naive_cut_rod_recursive(snake_case_ , snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
91
"""simple docstring""" def lowercase__ ( snake_case_ :Union[str, Any] ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = max(snake_case_ ) __UpperCAmelCase = min(snake_case_ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , snake_case_ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , snake_case_ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowercase__ ( snake_case_ :str ): return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" _lowercase : int = input('Enter numbers separated by a comma:\n').strip() _lowercase : int = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
332
0