code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) UpperCamelCase = hex_num[0] == """-""" if is_negative: UpperCamelCase = hex_num[1:] try: UpperCamelCase = int(__UpperCamelCase , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) UpperCamelCase = """""" while int_num > 0: UpperCamelCase = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
321
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
1
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ = namedtuple('covid_data', 'cases deaths recovered') def lowercase__ ( __UpperCamelCase = "https://www.worldometers.info/coronavirus/" )-> covid_data: UpperCamelCase = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE__ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase__ ( __UpperCamelCase )-> datetime: UpperCamelCase = year % 19 UpperCamelCase = year % 4 UpperCamelCase = year % 7 UpperCamelCase = math.floor(year / 100 ) UpperCamelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 ) UpperCamelCase = leap_day_inhibits / 4 UpperCamelCase = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 UpperCamelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCamelCase = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon UpperCamelCase = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__UpperCamelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__UpperCamelCase , 4 , 18 ) else: return datetime(__UpperCamelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): SCREAMING_SNAKE_CASE__ = 'will be' if year > datetime.now().year else 'was' print(f'Easter in {year} {tense} {gauss_easter(year)}')
321
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
1
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers SCREAMING_SNAKE_CASE__ = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('dataclasses') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('importlib_metadata') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> Union[str, Any]: require_version(deps[pkg] , __UpperCamelCase )
321
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase = count + min_val i += 1 def lowercase__ ( )-> Any: UpperCamelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
321
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: print("""Loading config file...""" ) def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ): UpperCamelCase = [] for k, v in d.items(): UpperCamelCase = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) UpperCamelCase = argparse.Namespace() with open(__UpperCamelCase , """r""" ) as yaml_file: try: UpperCamelCase = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader ) UpperCamelCase = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(__UpperCamelCase , str(__UpperCamelCase ) ) ) return config def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str: UpperCamelCase = MobileViTVaConfig() UpperCamelCase = False # dataset if task_name.startswith("""imagenet1k_""" ): UpperCamelCase = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: UpperCamelCase = 384 else: UpperCamelCase = 256 UpperCamelCase = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): UpperCamelCase = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: UpperCamelCase = 384 else: UpperCamelCase = 256 UpperCamelCase = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): UpperCamelCase = 151 UpperCamelCase = 512 UpperCamelCase = """ade20k-id2label.json""" UpperCamelCase = True elif task_name.startswith("""voc_""" ): UpperCamelCase = 21 UpperCamelCase = 512 UpperCamelCase = """pascal-voc-id2label.json""" UpperCamelCase = True # orig_config UpperCamelCase = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" UpperCamelCase = getattr(__UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(__UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCamelCase = getattr(__UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) UpperCamelCase = getattr(__UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label UpperCamelCase = """huggingface/label-files""" UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} return config def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict: UpperCamelCase = dct.pop(__UpperCamelCase ) UpperCamelCase = val def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> Optional[int]: if base_model: UpperCamelCase = """""" else: UpperCamelCase = """mobilevitv2.""" UpperCamelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCamelCase = k[8:] else: UpperCamelCase = k if ".block." in k: UpperCamelCase = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: UpperCamelCase = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: UpperCamelCase = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: UpperCamelCase = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: UpperCamelCase = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: UpperCamelCase = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: UpperCamelCase = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: UpperCamelCase = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: UpperCamelCase = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: UpperCamelCase = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: UpperCamelCase = [0, 1] elif i == 4: UpperCamelCase = [0, 1, 2, 3] elif i == 5: UpperCamelCase = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: UpperCamelCase = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: UpperCamelCase = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: UpperCamelCase = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: UpperCamelCase = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: UpperCamelCase = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: UpperCamelCase = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: UpperCamelCase = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: UpperCamelCase = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: UpperCamelCase = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: UpperCamelCase = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: UpperCamelCase = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: UpperCamelCase = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( )-> Tuple: UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCamelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]: UpperCamelCase = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase ) # load original state_dict UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): UpperCamelCase = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() UpperCamelCase = False else: UpperCamelCase = MobileViTVaForImageClassification(__UpperCamelCase ).eval() UpperCamelCase = False # remove and rename some keys of load the original model UpperCamelCase = checkpoint remove_unused_keys(__UpperCamelCase ) UpperCamelCase = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ) UpperCamelCase = model(**__UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): UpperCamelCase = outputs.logits UpperCamelCase = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCamelCase = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ) assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
321
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
1
'''simple docstring''' import unittest from knapsack import knapsack as k class a_ ( unittest.TestCase ): def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = [0] UpperCamelCase = [0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 ) UpperCamelCase = [60] UpperCamelCase = [10] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = 3 UpperCamelCase = [1, 2, 3] UpperCamelCase = [3, 2, 1] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 5 ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = 50 UpperCamelCase = [60, 100, 120] UpperCamelCase = [10, 20, 30] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 220 ) if __name__ == "__main__": unittest.main()
321
'''simple docstring''' from __future__ import annotations import math class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = size # approximate the overall size of segment tree with given value UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCamelCase = [0 for i in range(0 , 4 * size )] UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 + 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if left_element == right_element: UpperCamelCase = a[left_element - 1] else: UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCamelCase = val if left_element != right_element: UpperCamelCase = val UpperCamelCase = val UpperCamelCase = True UpperCamelCase = True return True UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) return True def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCamelCase = (left_element + right_element) // 2 UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __str__( self ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] SCREAMING_SNAKE_CASE__ = 1_5 SCREAMING_SNAKE_CASE__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
321
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 1000 )-> int: UpperCamelCase = -1 UpperCamelCase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase = n - a - b if c * c == (a * a + b * b): UpperCamelCase = a * b * c if candidate >= product: UpperCamelCase = candidate return product if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> Optional[int]: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: UpperCamelCase = os.path.abspath(__UpperCamelCase ) logger.info(F"Loading PyTorch weights from {pt_path}" ) UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" ) logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." ) UpperCamelCase = convert_pytorch_state_dict_to_flax(__UpperCamelCase , __UpperCamelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCamelCase = convert_pytorch_sharded_state_dict_to_flax(__UpperCamelCase , __UpperCamelCase ) return flax_state_dict def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(__UpperCamelCase ) -> bool: return len(set(__UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCamelCase = pt_tuple_key[:-1] + ("""scale""",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCamelCase = pt_tuple_key[:-1] + ("""mean""",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCamelCase = pt_tuple_key[:-1] + ("""var""",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCamelCase = pt_tuple_key[:-1] + ("""embedding""",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCamelCase = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCamelCase = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): UpperCamelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCamelCase = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCamelCase = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCamelCase = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCamelCase = pt_tuple_key[-2] + """_g""" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCamelCase = pt_tuple_key[-2] + """_v""" if name is not None: UpperCamelCase = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[str]: # convert pytorch tensor to numpy UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCamelCase = flax_model.params["""params"""] else: UpperCamelCase = flax_model.params UpperCamelCase = flatten_dict(__UpperCamelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase = flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(__UpperCamelCase ) UpperCamelCase = {} UpperCamelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) UpperCamelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary UpperCamelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase ,UpperCamelCase = rename_key_and_reshape_tensor( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # add model prefix if necessary UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCamelCase = jnp.asarray(__UpperCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__UpperCamelCase , __UpperCamelCase ) continue # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(__UpperCamelCase ) else: # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(__UpperCamelCase ) return unflatten_dict(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: import torch # Load the index UpperCamelCase = {} for shard_file in shard_filenames: # load using msgpack utils UpperCamelCase = torch.load(__UpperCamelCase ) UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase = flax_model.params["""params"""] UpperCamelCase = flatten_dict(__UpperCamelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: UpperCamelCase = flax_model.params UpperCamelCase = flatten_dict(__UpperCamelCase ) UpperCamelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) UpperCamelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary UpperCamelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase ,UpperCamelCase = rename_key_and_reshape_tensor( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # add model prefix if necessary UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCamelCase = jnp.asarray(__UpperCamelCase ) continue if "var" in flax_key[-1]: UpperCamelCase = jnp.asarray(__UpperCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__UpperCamelCase , __UpperCamelCase ) continue # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(__UpperCamelCase ) else: # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(__UpperCamelCase ) return unflatten_dict(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]: UpperCamelCase = os.path.abspath(__UpperCamelCase ) logger.info(F"Loading Flax weights from {flax_checkpoint_path}" ) # import correct flax class UpperCamelCase = getattr(__UpperCamelCase , """Flax""" + model.__class__.__name__ ) # load flax weight dict with open(__UpperCamelCase , """rb""" ) as state_f: try: UpperCamelCase = from_bytes(__UpperCamelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa , __UpperCamelCase ) ).values() if any(__UpperCamelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) UpperCamelCase = jax.tree_util.tree_map( lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCamelCase ) UpperCamelCase = flatten_dict(__UpperCamelCase ) UpperCamelCase = pt_model.state_dict() UpperCamelCase = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) UpperCamelCase = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCamelCase = [] UpperCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCamelCase = flax_key_tuple[0] == pt_model.base_model_prefix UpperCamelCase = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__UpperCamelCase ) not in pt_model_dict: # conv layer UpperCamelCase = flax_key_tuple[:-1] + ("""weight""",) UpperCamelCase = jnp.transpose(__UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ) not in pt_model_dict: # linear layer UpperCamelCase = flax_key_tuple[:-1] + ("""weight""",) UpperCamelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCamelCase = flax_key_tuple[:-1] + ("""weight""",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCamelCase = flax_key_tuple[:-1] + ("""running_mean""",) elif "var" in flax_key_tuple[-1]: UpperCamelCase = flax_key_tuple[:-1] + ("""running_var""",) if "batch_stats" in flax_state: UpperCamelCase = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCamelCase = """.""".join(__UpperCamelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCamelCase = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCamelCase = key.split(""".""" ) UpperCamelCase = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCamelCase = key_components[-2] + """_g""" elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCamelCase = key_components[-2] + """_v""" if name is not None: UpperCamelCase = key_components[:-3] + [name] UpperCamelCase = """.""".join(__UpperCamelCase ) UpperCamelCase = key if flax_key in special_pt_names: UpperCamelCase = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict UpperCamelCase = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase , np.ndarray ) else flax_tensor UpperCamelCase = torch.from_numpy(__UpperCamelCase ) # remove from missing keys missing_keys.remove(__UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__UpperCamelCase ) pt_model.load_state_dict(__UpperCamelCase ) # re-transform missing_keys to list UpperCamelCase = list(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" ) if len(__UpperCamelCase ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) else: logger.warning( F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" """If your task is similar to the task the model of the checkpoint was trained on, """ F"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model
321
'''simple docstring''' import argparse import struct import unittest class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = data # Initialize hash values UpperCamelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCamelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes: """simple docstring""" UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64)) UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) -> None: """simple docstring""" UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 ) UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 ) UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class a_ ( unittest.TestCase ): def A__ ( self ) -> None: """simple docstring""" import hashlib UpperCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() ) def lowercase__ ( )-> None: import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCamelCase = f.read() else: UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] UpperCamelCase = grid[0] for row_n in range(1 , len(__UpperCamelCase ) ): UpperCamelCase = grid[row_n] UpperCamelCase = fill_row(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = grid[row_n] return grid[-1][-1] def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list: current_row[0] += row_above[0] for cell_n in range(1 , len(__UpperCamelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
321
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) SCREAMING_SNAKE_CASE__ = _symbol_database.Default() SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) SCREAMING_SNAKE_CASE__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" SCREAMING_SNAKE_CASE__ = 4_5 SCREAMING_SNAKE_CASE__ = 1_5_8_1 SCREAMING_SNAKE_CASE__ = 1_5_1_7 SCREAMING_SNAKE_CASE__ = 1_5_7_0 SCREAMING_SNAKE_CASE__ = 1_5_8_4 SCREAMING_SNAKE_CASE__ = 1_7_9_3 SCREAMING_SNAKE_CASE__ = 1_7_9_5 SCREAMING_SNAKE_CASE__ = 1_9_1_6 SCREAMING_SNAKE_CASE__ = 1_8_6_4 SCREAMING_SNAKE_CASE__ = 1_9_0_5 SCREAMING_SNAKE_CASE__ = 1_9_1_9 SCREAMING_SNAKE_CASE__ = 2_4_2_9 SCREAMING_SNAKE_CASE__ = 2_2_0_8 SCREAMING_SNAKE_CASE__ = 2_4_1_8 SCREAMING_SNAKE_CASE__ = 2_3_2_3 SCREAMING_SNAKE_CASE__ = 2_4_0_7 # @@protoc_insertion_point(module_scope)
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase = False )-> str: if not isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase = F"Expected string as input, found {type(__UpperCamelCase )}" raise ValueError(__UpperCamelCase ) if not isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase = F"Expected boolean as use_pascal parameter, found {type(__UpperCamelCase )}" raise ValueError(__UpperCamelCase ) UpperCamelCase = input_str.split("""_""" ) UpperCamelCase = 0 if use_pascal else 1 UpperCamelCase = words[start_index:] UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] UpperCamelCase = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
321
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class a_ ( lowerCamelCase ): lowercase = """xlnet""" lowercase = ["""mems"""] lowercase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _SCREAMING_SNAKE_CASE=32000 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="bi" , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , _SCREAMING_SNAKE_CASE , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def A__ ( self ) -> Optional[Any]: """simple docstring""" logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
321
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __UpperCamelCase )-> str: if "://" in dataset_path: UpperCamelCase = dataset_path.split("""://""" )[1] return dataset_path def lowercase__ ( __UpperCamelCase )-> bool: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: UpperCamelCase = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def lowercase__ ( )-> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCamelCase = None UpperCamelCase = None UpperCamelCase = threading.Lock()
321
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class a_ ( unittest.TestCase ): def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = BlipImageProcessor() UpperCamelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) UpperCamelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) UpperCamelCase = InstructBlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).qformer_tokenizer def A__ ( self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 ) UpperCamelCase = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor.qformer_tokenizer , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ) UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE ) UpperCamelCase = """lower newer""" UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE ) UpperCamelCase = qformer_tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE ) UpperCamelCase = """lower newer""" UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(_SCREAMING_SNAKE_CASE ): processor() def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE ) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE ) UpperCamelCase = """lower newer""" UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
321
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
1
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 4000000 )-> int: UpperCamelCase = [] UpperCamelCase ,UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
321
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: UpperCamelCase = 1.5 UpperCamelCase = int(factor * num_class_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: UpperCamelCase = client.query(text=__UpperCamelCase ) if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4: break else: UpperCamelCase = int(factor * num_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , ) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase ) with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open( F"{class_data_dir}/images.txt" , """w""" ) as fa: while total < num_class_images: UpperCamelCase = class_images[count] count += 1 try: UpperCamelCase = requests.get(images["""url"""] ) if img.status_code == 200: UpperCamelCase = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__ ( )-> str: UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
321
1
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class a_ : lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 def A__ ( self ) -> Union[str, Any]: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A__ ( self ) -> Optional[Any]: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A__ ( self ) -> List[Any]: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A__ ( self ) -> torch.Tensor: """simple docstring""" UpperCamelCase = torch.arange(self.height * self.width ) UpperCamelCase = torch.stack( [ pixel_indices % self.width, torch.div(_SCREAMING_SNAKE_CASE , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase ,*UpperCamelCase = self.shape UpperCamelCase = int(np.prod(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = self.get_image_coords() UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCamelCase = self.get_camera_rays(_SCREAMING_SNAKE_CASE ) UpperCamelCase = rays.view(_SCREAMING_SNAKE_CASE , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" UpperCamelCase ,*UpperCamelCase ,UpperCamelCase = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCamelCase = coords.view(_SCREAMING_SNAKE_CASE , -1 , 2 ) UpperCamelCase = self.resolution() UpperCamelCase = self.fov() UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1 UpperCamelCase = fracs * torch.tan(fov / 2 ) UpperCamelCase = fracs.view(_SCREAMING_SNAKE_CASE , -1 , 2 ) UpperCamelCase = ( self.z.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) + self.x.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, 1:] ) UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=_SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.stack( [ torch.broadcast_to(self.origin.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , 2 , 3 ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=self.x_fov , y_fov=self.y_fov , ) def lowercase__ ( __UpperCamelCase )-> DifferentiableProjectiveCamera: UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): UpperCamelCase = np.array([np.sin(__UpperCamelCase ), np.cos(__UpperCamelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCamelCase = -z * 4 UpperCamelCase = np.array([np.cos(__UpperCamelCase ), -np.sin(__UpperCamelCase ), 0.0] ) UpperCamelCase = np.cross(__UpperCamelCase , __UpperCamelCase ) origins.append(__UpperCamelCase ) xs.append(__UpperCamelCase ) ys.append(__UpperCamelCase ) zs.append(__UpperCamelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , width=__UpperCamelCase , height=__UpperCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCamelCase )) , )
321
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class a_ : lowercase = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} ) lowercase = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = {} if self.train_dir is not None: UpperCamelCase = self.train_dir if self.validation_dir is not None: UpperCamelCase = self.validation_dir UpperCamelCase = data_files if data_files else None @dataclass class a_ : lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) lowercase = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class a_ ( lowerCamelCase ): lowercase = field( default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__ ( )-> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0: UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase = split["""train"""] UpperCamelCase = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase ) if training_args.do_train: UpperCamelCase = ds["""train"""].column_names else: UpperCamelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase = data_args.image_column_name elif "image" in column_names: UpperCamelCase = """image""" elif "img" in column_names: UpperCamelCase = """img""" else: UpperCamelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase = image_processor.size["""shortest_edge"""] else: UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase = Compose( [ Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__UpperCamelCase ): UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__UpperCamelCase ) # Compute absolute learning rate UpperCamelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: UpperCamelCase = None if training_args.resume_from_checkpoint is not None: UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase = last_checkpoint UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCamelCase ) trainer.save_metrics("""eval""" , __UpperCamelCase ) # Write model card and (optionally) push to hub UpperCamelCase = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCamelCase ) else: trainer.create_model_card(**__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> tuple[int, int]: try: UpperCamelCase = float(__UpperCamelCase ) except ValueError: raise ValueError("""Please enter a valid number""" ) UpperCamelCase = decimal - int(__UpperCamelCase ) if fractional_part == 0: return int(__UpperCamelCase ), 1 else: UpperCamelCase = len(str(__UpperCamelCase ).split(""".""" )[1] ) UpperCamelCase = int(decimal * (10**number_of_frac_digits) ) UpperCamelCase = 10**number_of_frac_digits UpperCamelCase ,UpperCamelCase = denominator, numerator while True: UpperCamelCase = dividend % divisor if remainder == 0: break UpperCamelCase ,UpperCamelCase = divisor, remainder UpperCamelCase ,UpperCamelCase = numerator / divisor, denominator / divisor return int(__UpperCamelCase ), int(__UpperCamelCase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
321
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = 2_5_6 class a_ ( lowerCamelCase ): lowercase = ["""melgan"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__() # From MELGAN UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training. UpperCamelCase = 4.0 # Largest value for most examples UpperCamelCase = 128 self.register_modules( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" UpperCamelCase ,UpperCamelCase = output_range if clip: UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = input_range UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs # Scale to [0, 1]. UpperCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = input_tokens > 0 UpperCamelCase ,UpperCamelCase = self.notes_encoder( encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.continuous_encoder( encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = noise_time if not torch.is_tensor(_SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase = self.decoder( encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE ) return logits @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_SCREAMING_SNAKE_CASE )}." ) UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCamelCase = ones UpperCamelCase = self.scale_features( _SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCamelCase = self.decode( encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] ) UpperCamelCase = mel[:1] UpperCamelCase = mel.cpu().float().numpy() UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
321
1
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase )-> bool: UpperCamelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase = 1 / 12345 )-> int: UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 3 while True: UpperCamelCase = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__UpperCamelCase ): UpperCamelCase = int(__UpperCamelCase ) total_partitions += 1 if check_partition_perfect(__UpperCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__UpperCamelCase ) integer += 1 if __name__ == "__main__": print(f'{solution() = }')
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 4000000 )-> int: UpperCamelCase = [] UpperCamelCase ,UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase__ ( __UpperCamelCase )-> Optional[Any]: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Dict: UpperCamelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue UpperCamelCase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) UpperCamelCase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) UpperCamelCase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) UpperCamelCase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) UpperCamelCase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) UpperCamelCase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) UpperCamelCase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) UpperCamelCase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) UpperCamelCase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) UpperCamelCase = key.replace("""image_encoder.module""" , """flava.image_model""" ) UpperCamelCase = key.replace("""text_encoder.module""" , """flava.text_model""" ) UpperCamelCase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) UpperCamelCase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) UpperCamelCase = key.replace("""text_projection""" , """flava.text_projection""" ) UpperCamelCase = key.replace("""image_projection""" , """flava.image_projection""" ) UpperCamelCase = value.float() for key, value in codebook_state_dict.items(): UpperCamelCase = value return upgrade @torch.no_grad() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None )-> Any: if config_path is not None: UpperCamelCase = FlavaConfig.from_pretrained(__UpperCamelCase ) else: UpperCamelCase = FlavaConfig() UpperCamelCase = FlavaForPreTraining(__UpperCamelCase ).eval() UpperCamelCase = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" ) else: UpperCamelCase = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" ) UpperCamelCase = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) UpperCamelCase = hf_model.state_dict() UpperCamelCase = count_parameters(__UpperCamelCase ) UpperCamelCase = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: # Base Case if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]: UpperCamelCase = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
321
1
'''simple docstring''' from __future__ import annotations def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): UpperCamelCase ,UpperCamelCase = array[indexa], array[indexa] def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None: if length > 1: UpperCamelCase = int(length / 2 ) for i in range(__UpperCamelCase , low + middle ): comp_and_swap(__UpperCamelCase , __UpperCamelCase , i + middle , __UpperCamelCase ) bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) bitonic_merge(__UpperCamelCase , low + middle , __UpperCamelCase , __UpperCamelCase ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None: if length > 1: UpperCamelCase = int(length / 2 ) bitonic_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 1 ) bitonic_sort(__UpperCamelCase , low + middle , __UpperCamelCase , 0 ) bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 2000000 )-> int: UpperCamelCase = [0 for i in range(n + 1 )] UpperCamelCase = 1 UpperCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCamelCase ): UpperCamelCase = 1 UpperCamelCase = 0 for i in range(__UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1 )-> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Optional[int]: UpperCamelCase = [] for old_item in old_list: UpperCamelCase = old_item.replace("""in_layers.0""" , """norm1""" ) UpperCamelCase = new_item.replace("""in_layers.2""" , """conv1""" ) UpperCamelCase = new_item.replace("""out_layers.0""" , """norm2""" ) UpperCamelCase = new_item.replace("""out_layers.3""" , """conv2""" ) UpperCamelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) UpperCamelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" ) UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Optional[int]: UpperCamelCase = [] for old_item in old_list: UpperCamelCase = old_item UpperCamelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" ) UpperCamelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" ) UpperCamelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) UpperCamelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None )-> int: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): UpperCamelCase = old_checkpoint[path] UpperCamelCase = old_tensor.shape[0] // 3 UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) UpperCamelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3 UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) UpperCamelCase ,UpperCamelCase ,UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 ) UpperCamelCase = query.reshape(__UpperCamelCase ) UpperCamelCase = key.reshape(__UpperCamelCase ) UpperCamelCase = value.reshape(__UpperCamelCase ) for path in paths: UpperCamelCase = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here UpperCamelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) UpperCamelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) UpperCamelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: UpperCamelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: UpperCamelCase = old_checkpoint[path["""old"""]][:, :, 0] else: UpperCamelCase = old_checkpoint[path["""old"""]] def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str: UpperCamelCase = {} UpperCamelCase = checkpoint["""time_embed.0.weight"""] UpperCamelCase = checkpoint["""time_embed.0.bias"""] UpperCamelCase = checkpoint["""time_embed.2.weight"""] UpperCamelCase = checkpoint["""time_embed.2.bias"""] UpperCamelCase = checkpoint["""input_blocks.0.0.weight"""] UpperCamelCase = checkpoint["""input_blocks.0.0.bias"""] UpperCamelCase = checkpoint["""out.0.weight"""] UpperCamelCase = checkpoint["""out.0.bias"""] UpperCamelCase = checkpoint["""out.2.weight"""] UpperCamelCase = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) UpperCamelCase = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) UpperCamelCase = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) UpperCamelCase = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): UpperCamelCase = (i - 1) // (config["""num_res_blocks"""] + 1) UpperCamelCase = (i - 1) % (config["""num_res_blocks"""] + 1) UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: UpperCamelCase = checkpoint[ F"input_blocks.{i}.0.op.weight" ] UpperCamelCase = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue UpperCamelCase = renew_resnet_paths(__UpperCamelCase ) UpperCamelCase = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} UpperCamelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): UpperCamelCase = renew_attention_paths(__UpperCamelCase ) UpperCamelCase = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } UpperCamelCase = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) UpperCamelCase = middle_blocks[0] UpperCamelCase = middle_blocks[1] UpperCamelCase = middle_blocks[2] UpperCamelCase = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) UpperCamelCase = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) UpperCamelCase = renew_attention_paths(__UpperCamelCase ) UpperCamelCase = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): UpperCamelCase = i // (config["""num_res_blocks"""] + 1) UpperCamelCase = i % (config["""num_res_blocks"""] + 1) UpperCamelCase = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] UpperCamelCase = {} for layer in output_block_layers: UpperCamelCase ,UpperCamelCase = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: UpperCamelCase = [layer_name] if len(__UpperCamelCase ) > 1: UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] UpperCamelCase = renew_resnet_paths(__UpperCamelCase ) UpperCamelCase = renew_resnet_paths(__UpperCamelCase ) UpperCamelCase = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): UpperCamelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) UpperCamelCase = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] UpperCamelCase = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: UpperCamelCase = [] if len(__UpperCamelCase ): UpperCamelCase = renew_attention_paths(__UpperCamelCase ) UpperCamelCase = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } UpperCamelCase = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: UpperCamelCase = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: UpperCamelCase = """.""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) UpperCamelCase = """.""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) UpperCamelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: SCREAMING_SNAKE_CASE__ = json.loads(f.read()) SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] SCREAMING_SNAKE_CASE__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
321
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('KEY') SCREAMING_SNAKE_CASE__ = TypeVar('VAL') @dataclass(frozen=lowerCamelCase , slots=lowerCamelCase ) class a_ ( Generic[KEY, VAL] ): lowercase = 42 lowercase = 42 class a_ ( _Item ): def __init__( self ) -> None: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __bool__( self ) -> bool: """simple docstring""" return False SCREAMING_SNAKE_CASE__ = _DeletedItem() class a_ ( MutableMapping[KEY, VAL] ): def __init__( self , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.7_5 ) -> None: """simple docstring""" UpperCamelCase = initial_block_size UpperCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 UpperCamelCase = capacity_factor UpperCamelCase = 0 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return hash(_SCREAMING_SNAKE_CASE ) % len(self._buckets ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return (ind + 1) % len(self._buckets ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" UpperCamelCase = self._buckets[ind] if not stored: UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self._len += 1 return True elif stored.key == key: UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return True else: return False def A__ ( self ) -> bool: """simple docstring""" UpperCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> bool: """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = self._buckets UpperCamelCase = [None] * new_size UpperCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def A__ ( self ) -> None: """simple docstring""" self._resize(len(self._buckets ) * 2 ) def A__ ( self ) -> None: """simple docstring""" self._resize(len(self._buckets ) // 2 ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Iterator[int]: """simple docstring""" UpperCamelCase = self._get_bucket_index(_SCREAMING_SNAKE_CASE ) for _ in range(len(self._buckets ) ): yield ind UpperCamelCase = self._get_next_ind(_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): if self._try_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): break def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if self._is_full(): self._size_up() self._add_item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __delitem__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): UpperCamelCase = self._buckets[ind] if item is None: raise KeyError(_SCREAMING_SNAKE_CASE ) if item is _deleted: continue if item.key == key: UpperCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> VAL: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): UpperCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_SCREAMING_SNAKE_CASE ) def __len__( self ) -> int: """simple docstring""" return self._len def __iter__( self ) -> Iterator[KEY]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: """simple docstring""" UpperCamelCase = """ ,""".join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
321
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ = { 'b0': { 'hidden_dim': 1_2_8_0, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_2_4, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1_2_8_0, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_4_0, 'dropout_rate': 0.2, 'dw_padding': [1_6], }, 'b2': { 'hidden_dim': 1_4_0_8, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_6_0, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 1_6], }, 'b3': { 'hidden_dim': 1_5_3_6, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_0_0, 'dropout_rate': 0.3, 'dw_padding': [5, 1_8], }, 'b4': { 'hidden_dim': 1_7_9_2, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_8_0, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2_0_4_8, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_5_6, 'dropout_rate': 0.4, 'dw_padding': [1_3, 2_7], }, 'b6': { 'hidden_dim': 2_3_0_4, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_2_8, 'dropout_rate': 0.5, 'dw_padding': [3_1], }, 'b7': { 'hidden_dim': 2_5_6_0, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_0_0, 'dropout_rate': 0.5, 'dw_padding': [1_8], }, } def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = EfficientNetConfig() UpperCamelCase = CONFIG_MAP[model_name]["""hidden_dim"""] UpperCamelCase = CONFIG_MAP[model_name]["""width_coef"""] UpperCamelCase = CONFIG_MAP[model_name]["""depth_coef"""] UpperCamelCase = CONFIG_MAP[model_name]["""image_size"""] UpperCamelCase = CONFIG_MAP[model_name]["""dropout_rate"""] UpperCamelCase = CONFIG_MAP[model_name]["""dw_padding"""] UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """imagenet-1k-id2label.json""" UpperCamelCase = 1000 UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} return config def lowercase__ ( )-> Optional[int]: UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im def lowercase__ ( __UpperCamelCase )-> List[Any]: UpperCamelCase = CONFIG_MAP[model_name]["""image_size"""] UpperCamelCase = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=__UpperCamelCase , ) return preprocessor def lowercase__ ( __UpperCamelCase )-> List[str]: UpperCamelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] UpperCamelCase = sorted(set(__UpperCamelCase ) ) UpperCamelCase = len(__UpperCamelCase ) UpperCamelCase = {b: str(__UpperCamelCase ) for b, i in zip(__UpperCamelCase , range(__UpperCamelCase ) )} UpperCamelCase = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: UpperCamelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) UpperCamelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCamelCase = """efficientnet.""" + item[1] UpperCamelCase = """classifier.weight""" UpperCamelCase = """classifier.bias""" return key_mapping def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]: for key, value in tf_params.items(): if "normalization" in key: continue UpperCamelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCamelCase = torch.from_numpy(__UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCamelCase = torch.from_numpy(__UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCamelCase = torch.from_numpy(np.transpose(__UpperCamelCase ) ) else: UpperCamelCase = torch.from_numpy(__UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__UpperCamelCase ) @torch.no_grad() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: UpperCamelCase = model_classes[model_name]( include_top=__UpperCamelCase , weights="""imagenet""" , input_tensor=__UpperCamelCase , input_shape=__UpperCamelCase , pooling=__UpperCamelCase , classes=1000 , classifier_activation="""softmax""" , ) UpperCamelCase = original_model.trainable_variables UpperCamelCase = original_model.non_trainable_variables UpperCamelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCamelCase = param.numpy() UpperCamelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCamelCase = get_efficientnet_config(__UpperCamelCase ) UpperCamelCase = EfficientNetForImageClassification(__UpperCamelCase ).eval() UpperCamelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) UpperCamelCase = rename_keys(__UpperCamelCase ) replace_params(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Initialize preprocessor and preprocess input image UpperCamelCase = convert_image_processor(__UpperCamelCase ) UpperCamelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCamelCase = hf_model(**__UpperCamelCase ) UpperCamelCase = outputs.logits.detach().numpy() # Original model inference UpperCamelCase = False UpperCamelCase = CONFIG_MAP[model_name]["""image_size"""] UpperCamelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCamelCase = image.img_to_array(__UpperCamelCase ) UpperCamelCase = np.expand_dims(__UpperCamelCase , axis=0 ) UpperCamelCase = original_model.predict(__UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(__UpperCamelCase ): os.mkdir(__UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(__UpperCamelCase ) preprocessor.save_pretrained(__UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCamelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(__UpperCamelCase ) hf_model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
321
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
321
1
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class a_ : def __init__( self ) -> Dict: """simple docstring""" UpperCamelCase = False def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if not self.initialized: UpperCamelCase = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = True def A__ ( self ) -> List[Any]: """simple docstring""" self.retriever.index.init_index() def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.retriever._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return doc_ids, retrieved_doc_embeds class a_ ( lowerCamelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" if index is not None and index.is_initialized() and len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for worker in self.retrieval_workers ] ) def A__ ( self ) -> str: """simple docstring""" logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] UpperCamelCase ,UpperCamelCase = ray.get(random_worker.retrieve.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: UpperCamelCase ,UpperCamelCase = self._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_SCREAMING_SNAKE_CASE ) @classmethod def A__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return super(_SCREAMING_SNAKE_CASE , cls ).get_tokenizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def A__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = kwargs.pop("""config""" , _SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) UpperCamelCase = rag_tokenizer.question_encoder UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: UpperCamelCase = """custom""" UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE ) else: UpperCamelCase = cls._build_index(_SCREAMING_SNAKE_CASE ) return cls( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , retrieval_workers=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
321
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
1
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]: try: with open(__UpperCamelCase , """rb""" ) as flax_state_f: UpperCamelCase = from_bytes(__UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(__UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa , __UpperCamelCase ) ).values() if any(__UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) UpperCamelCase = jax.tree_util.tree_map( lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCamelCase ) UpperCamelCase = """""" UpperCamelCase = flatten_dict(__UpperCamelCase , sep=""".""" ) UpperCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys UpperCamelCase = [] UpperCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: UpperCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] UpperCamelCase = jnp.transpose(__UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": UpperCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] UpperCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": UpperCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__UpperCamelCase ): UpperCamelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) UpperCamelCase = """.""".join(__UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict UpperCamelCase = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase , np.ndarray ) else flax_tensor UpperCamelCase = torch.from_numpy(__UpperCamelCase ) # remove from missing keys missing_keys.remove(__UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__UpperCamelCase ) pt_model.load_state_dict(__UpperCamelCase ) # re-transform missing_keys to list UpperCamelCase = list(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(__UpperCamelCase ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) return pt_model
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { 'nielsr/canine-s': 2_0_4_8, } # Unicode defines 1,114,112 total “codepoints” SCREAMING_SNAKE_CASE__ = 1_1_1_4_1_1_2 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0Xe000 SCREAMING_SNAKE_CASE__ = 0Xe001 SCREAMING_SNAKE_CASE__ = 0Xe002 SCREAMING_SNAKE_CASE__ = 0Xe003 SCREAMING_SNAKE_CASE__ = 0Xe004 # Maps special codepoints to human-readable names. SCREAMING_SNAKE_CASE__ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. SCREAMING_SNAKE_CASE__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class a_ ( lowerCamelCase ): lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2048 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) # Creates a mapping for looking up the IDs of special symbols. UpperCamelCase = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): UpperCamelCase = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. UpperCamelCase = { codepoint: name for name, codepoint in self._special_codepoints.items() } UpperCamelCase = UNICODE_VOCAB_SIZE UpperCamelCase = len(self._special_codepoints ) @property def A__ ( self ) -> int: """simple docstring""" return self._unicode_vocab_size def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return list(_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" try: return ord(_SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(F"invalid token: '{token}'" ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(_SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(F"invalid id: {index}" ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return "".join(_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return result def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[int]: """simple docstring""" return ()
321
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = 'src/transformers' # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*try:') # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*else:') def lowercase__ ( __UpperCamelCase )-> Optional[Any]: if _re_test_backend.search(__UpperCamelCase ) is None: return None UpperCamelCase = [b[0] for b in _re_backend.findall(__UpperCamelCase )] backends.sort() return "_and_".join(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> Dict: with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = 0 while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: UpperCamelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__UpperCamelCase ): UpperCamelCase = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0] UpperCamelCase = re.findall("""\[([^\]]+)\]""" , __UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue UpperCamelCase = _re_import_struct_key_value.search(__UpperCamelCase ) if single_line_import_search is not None: UpperCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): UpperCamelCase = lines[line_index] if _re_import_struct_add_one.search(__UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None: UpperCamelCase = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(""", """ ) UpperCamelCase = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif _re_between_brackets.search(__UpperCamelCase ) is not None: UpperCamelCase = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(""", """ ) UpperCamelCase = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0] objects.extend(__UpperCamelCase ) elif _re_quote_object.search(__UpperCamelCase ) is not None: objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 UpperCamelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase = [] while ( line_index < len(__UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): UpperCamelCase = lines[line_index] UpperCamelCase = _re_import.search(__UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): UpperCamelCase = lines[line_index] UpperCamelCase = _re_import.search(__UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCamelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Dict: def find_duplicates(__UpperCamelCase ): return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase = [] for key in import_dict_objects.keys(): UpperCamelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCamelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase = """base imports""" if key == """none""" else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def lowercase__ ( )-> Optional[Any]: UpperCamelCase = [] for root, _, files in os.walk(__UpperCamelCase ): if "__init__.py" in files: UpperCamelCase = os.path.join(__UpperCamelCase , """__init__.py""" ) UpperCamelCase = parse_init(__UpperCamelCase ) if objects is not None: UpperCamelCase = analyze_results(*__UpperCamelCase ) if len(__UpperCamelCase ) > 0: UpperCamelCase = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("""\n""".join(__UpperCamelCase ) ) if len(__UpperCamelCase ) > 0: raise ValueError("""\n\n""".join(__UpperCamelCase ) ) def lowercase__ ( )-> Optional[Any]: UpperCamelCase = [] for path, directories, files in os.walk(__UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue UpperCamelCase = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) ) UpperCamelCase = short_path.replace(os.path.sep , """.""" ) submodules.append(__UpperCamelCase ) for fname in files: if fname == "__init__.py": continue UpperCamelCase = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) ) UpperCamelCase = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__UpperCamelCase ) return submodules SCREAMING_SNAKE_CASE__ = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def lowercase__ ( )-> List[Any]: # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase = importlib.util.spec_from_file_location( """transformers""" , os.path.join(__UpperCamelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCamelCase = spec.loader.load_module() UpperCamelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__UpperCamelCase ) > 0: UpperCamelCase = """\n""".join(F"- {module}" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" F"{list_of_modules}\n" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
321
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase = count + min_val i += 1 def lowercase__ ( )-> Any: UpperCamelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
321
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=5 )-> List[Any]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("""<mask>""" ) == 1 UpperCamelCase = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1 UpperCamelCase = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() UpperCamelCase = logits[0, masked_index, :] UpperCamelCase = logits.softmax(dim=0 ) UpperCamelCase ,UpperCamelCase = prob.topk(k=__UpperCamelCase , dim=0 ) UpperCamelCase = """ """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] ) UpperCamelCase = tokenizer.mask_token UpperCamelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ): UpperCamelCase = predicted_token_bpe.replace("""\u2581""" , """ """ ) if " {0}".format(__UpperCamelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(""" {0}""".format(__UpperCamelCase ) , __UpperCamelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__UpperCamelCase , __UpperCamelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs SCREAMING_SNAKE_CASE__ = CamembertTokenizer.from_pretrained('camembert-base') SCREAMING_SNAKE_CASE__ = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() SCREAMING_SNAKE_CASE__ = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
321
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
1
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params SCREAMING_SNAKE_CASE__ = getLogger(__name__) SCREAMING_SNAKE_CASE__ = 'cuda' if torch.cuda.is_available() else 'cpu' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 8 , __UpperCamelCase = DEFAULT_DEVICE , __UpperCamelCase=False , __UpperCamelCase="summarization" , __UpperCamelCase=None , **__UpperCamelCase , )-> Dict: UpperCamelCase = Path(__UpperCamelCase ).open("""w""" , encoding="""utf-8""" ) UpperCamelCase = str(__UpperCamelCase ) UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) if fpaa: UpperCamelCase = model.half() UpperCamelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(__UpperCamelCase , __UpperCamelCase ) if prefix is None: UpperCamelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(__UpperCamelCase , __UpperCamelCase ) ) ): UpperCamelCase = [prefix + text for text in examples_chunk] UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , truncation=__UpperCamelCase , padding="""longest""" ).to(__UpperCamelCase ) UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__UpperCamelCase , ) UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() UpperCamelCase = int(time.time() - start_time ) # seconds UpperCamelCase = len(__UpperCamelCase ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def lowercase__ ( )-> str: return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def lowercase__ ( __UpperCamelCase=True )-> str: UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=__UpperCamelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=__UpperCamelCase , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=__UpperCamelCase , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=__UpperCamelCase , required=__UpperCamelCase , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=__UpperCamelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=__UpperCamelCase , default=8 , required=__UpperCamelCase , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=__UpperCamelCase , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase ,UpperCamelCase = parser.parse_known_args() UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) UpperCamelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=__UpperCamelCase ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) UpperCamelCase = generate_summaries_or_translations( __UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__UpperCamelCase , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase = calculate_bleu if """translation""" in args.task else calculate_rouge UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__UpperCamelCase )] UpperCamelCase = score_fn(__UpperCamelCase , __UpperCamelCase ) scores.update(__UpperCamelCase ) if args.dump_args: scores.update(__UpperCamelCase ) if args.info: UpperCamelCase = args.info if verbose: print(__UpperCamelCase ) if args.score_path is not None: json.dump(__UpperCamelCase , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
321
'''simple docstring''' from __future__ import annotations import math class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = size # approximate the overall size of segment tree with given value UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCamelCase = [0 for i in range(0 , 4 * size )] UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 + 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if left_element == right_element: UpperCamelCase = a[left_element - 1] else: UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCamelCase = val if left_element != right_element: UpperCamelCase = val UpperCamelCase = val UpperCamelCase = True UpperCamelCase = True return True UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) return True def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCamelCase = (left_element + right_element) // 2 UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __str__( self ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] SCREAMING_SNAKE_CASE__ = 1_5 SCREAMING_SNAKE_CASE__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
321
1
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 1000 )-> int: UpperCamelCase = -1 UpperCamelCase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase = n - a - b if c * c == (a * a + b * b): UpperCamelCase = a * b * c if candidate >= product: UpperCamelCase = candidate return product if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : lowercase = 42 lowercase = 42 class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] UpperCamelCase = size def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def A__ ( self ) -> Tuple: """simple docstring""" return self._size def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None: """simple docstring""" UpperCamelCase = deque([start_vertex] ) UpperCamelCase = [None] * self.size UpperCamelCase = 0 while queue: UpperCamelCase = queue.popleft() UpperCamelCase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: UpperCamelCase = current_distance + edge.weight UpperCamelCase = distances[edge.destination_vertex] if ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and new_distance >= dest_vertex_distance ): continue UpperCamelCase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
321
'''simple docstring''' import argparse import struct import unittest class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = data # Initialize hash values UpperCamelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCamelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes: """simple docstring""" UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64)) UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) -> None: """simple docstring""" UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 ) UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 ) UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class a_ ( unittest.TestCase ): def A__ ( self ) -> None: """simple docstring""" import hashlib UpperCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() ) def lowercase__ ( )-> None: import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCamelCase = f.read() else: UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
321
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) SCREAMING_SNAKE_CASE__ = _symbol_database.Default() SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) SCREAMING_SNAKE_CASE__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" SCREAMING_SNAKE_CASE__ = 4_5 SCREAMING_SNAKE_CASE__ = 1_5_8_1 SCREAMING_SNAKE_CASE__ = 1_5_1_7 SCREAMING_SNAKE_CASE__ = 1_5_7_0 SCREAMING_SNAKE_CASE__ = 1_5_8_4 SCREAMING_SNAKE_CASE__ = 1_7_9_3 SCREAMING_SNAKE_CASE__ = 1_7_9_5 SCREAMING_SNAKE_CASE__ = 1_9_1_6 SCREAMING_SNAKE_CASE__ = 1_8_6_4 SCREAMING_SNAKE_CASE__ = 1_9_0_5 SCREAMING_SNAKE_CASE__ = 1_9_1_9 SCREAMING_SNAKE_CASE__ = 2_4_2_9 SCREAMING_SNAKE_CASE__ = 2_2_0_8 SCREAMING_SNAKE_CASE__ = 2_4_1_8 SCREAMING_SNAKE_CASE__ = 2_3_2_3 SCREAMING_SNAKE_CASE__ = 2_4_0_7 # @@protoc_insertion_point(module_scope)
321
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class a_ ( lowerCamelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class a_ ( unittest.TestCase ): @property def A__ ( self ) -> Dict: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = ort.SessionOptions() UpperCamelCase = False return options def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase = """A red cat sitting on a park bench""" UpperCamelCase = np.random.RandomState(0 ) UpperCamelCase = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , ) UpperCamelCase = output.images UpperCamelCase = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) UpperCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) UpperCamelCase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) UpperCamelCase = """A red cat sitting on a park bench""" UpperCamelCase = np.random.RandomState(0 ) UpperCamelCase = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , ) UpperCamelCase = output.images UpperCamelCase = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
321
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __UpperCamelCase )-> str: if "://" in dataset_path: UpperCamelCase = dataset_path.split("""://""" )[1] return dataset_path def lowercase__ ( __UpperCamelCase )-> bool: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: UpperCamelCase = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def lowercase__ ( )-> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCamelCase = None UpperCamelCase = None UpperCamelCase = threading.Lock()
321
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ = {f'funnel-transformer/{name}': 5_1_2 for name in _model_names} SCREAMING_SNAKE_CASE__ = {f'funnel-transformer/{name}': {'do_lower_case': True} for name in _model_names} class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = FunnelTokenizer lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = 2 def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="##" , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) ) UpperCamelCase = do_lower_case UpperCamelCase = strip_accents UpperCamelCase = tokenize_chinese_chars UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = do_lower_case def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict: """simple docstring""" UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE )
321
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str | Literal[False]: UpperCamelCase = list(__UpperCamelCase ) UpperCamelCase = list(__UpperCamelCase ) UpperCamelCase = 0 for i in range(len(__UpperCamelCase ) ): if lista[i] != lista[i]: count += 1 UpperCamelCase = """_""" if count > 1: return False else: return "".join(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> list[str]: UpperCamelCase = [] while True: UpperCamelCase = ["""$"""] * len(__UpperCamelCase ) UpperCamelCase = [] for i in range(len(__UpperCamelCase ) ): for j in range(i + 1 , len(__UpperCamelCase ) ): UpperCamelCase = compare_string(binary[i] , binary[j] ) if k is False: UpperCamelCase = """*""" UpperCamelCase = """*""" temp.append("""X""" ) for i in range(len(__UpperCamelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__UpperCamelCase ) == 0: return pi UpperCamelCase = list(set(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[str]: UpperCamelCase = [] for minterm in minterms: UpperCamelCase = """""" for _ in range(__UpperCamelCase ): UpperCamelCase = str(minterm % 2 ) + string minterm //= 2 temp.append(__UpperCamelCase ) return temp def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: UpperCamelCase = list(__UpperCamelCase ) UpperCamelCase = list(__UpperCamelCase ) UpperCamelCase = 0 for i in range(len(__UpperCamelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[str]: UpperCamelCase = [] UpperCamelCase = [0] * len(__UpperCamelCase ) for i in range(len(chart[0] ) ): UpperCamelCase = 0 UpperCamelCase = -1 for j in range(len(__UpperCamelCase ) ): if chart[j][i] == 1: count += 1 UpperCamelCase = j if count == 1: UpperCamelCase = 1 for i in range(len(__UpperCamelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__UpperCamelCase ) ): UpperCamelCase = 0 temp.append(prime_implicants[i] ) while True: UpperCamelCase = 0 UpperCamelCase = -1 UpperCamelCase = 0 for i in range(len(__UpperCamelCase ) ): UpperCamelCase = chart[i].count(1 ) if count_n > max_n: UpperCamelCase = count_n UpperCamelCase = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__UpperCamelCase ) ): UpperCamelCase = 0 def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[list[int]]: UpperCamelCase = [[0 for x in range(len(__UpperCamelCase ) )] for x in range(len(__UpperCamelCase ) )] for i in range(len(__UpperCamelCase ) ): UpperCamelCase = prime_implicants[i].count("""_""" ) for j in range(len(__UpperCamelCase ) ): if is_for_table(prime_implicants[i] , binary[j] , __UpperCamelCase ): UpperCamelCase = 1 return chart def lowercase__ ( )-> None: UpperCamelCase = int(input("""Enter the no. of variables\n""" ) ) UpperCamelCase = [ float(__UpperCamelCase ) for x in input( """Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split() ] UpperCamelCase = decimal_to_binary(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = check(__UpperCamelCase ) print("""Prime Implicants are:""" ) print(__UpperCamelCase ) UpperCamelCase = prime_implicant_chart(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = selection(__UpperCamelCase , __UpperCamelCase ) print("""Essential Prime Implicants are:""" ) print(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
321
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
1
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=0.2 ) -> Tuple: """simple docstring""" UpperCamelCase = bp_numa UpperCamelCase = bp_numa UpperCamelCase = bp_numa UpperCamelCase = conva_get[:2] UpperCamelCase = conva_get[2] UpperCamelCase = size_pa UpperCamelCase = rate_w UpperCamelCase = rate_t UpperCamelCase = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" UpperCamelCase = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(_SCREAMING_SNAKE_CASE , """wb""" ) as f: pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"Model saved: {save_path}" ) @classmethod def A__ ( cls , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f: UpperCamelCase = pickle.load(_SCREAMING_SNAKE_CASE ) # noqa: S301 UpperCamelCase = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCamelCase = model_dic.get("""size_pooling1""" ) UpperCamelCase = model_dic.get("""num_bp1""" ) UpperCamelCase = model_dic.get("""num_bp2""" ) UpperCamelCase = model_dic.get("""num_bp3""" ) UpperCamelCase = model_dic.get("""rate_weight""" ) UpperCamelCase = model_dic.get("""rate_thre""" ) # create model instance UpperCamelCase = CNN(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # modify model parameter UpperCamelCase = model_dic.get("""w_conv1""" ) UpperCamelCase = model_dic.get("""wkj""" ) UpperCamelCase = model_dic.get("""vji""" ) UpperCamelCase = model_dic.get("""thre_conv1""" ) UpperCamelCase = model_dic.get("""thre_bp2""" ) UpperCamelCase = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return 1 / (1 + np.exp(-1 * x )) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return round(_SCREAMING_SNAKE_CASE , 3 ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase = convs[0] UpperCamelCase = convs[1] UpperCamelCase = np.shape(_SCREAMING_SNAKE_CASE )[0] # get the data slice of original image data, data_focus UpperCamelCase = [] for i_focus in range(0 , size_data - size_conv + 1 , _SCREAMING_SNAKE_CASE ): for j_focus in range(0 , size_data - size_conv + 1 , _SCREAMING_SNAKE_CASE ): UpperCamelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_SCREAMING_SNAKE_CASE ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase = [] UpperCamelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_SCREAMING_SNAKE_CASE ): UpperCamelCase = [] for i_focus in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCamelCase = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) data_featuremap.append(_SCREAMING_SNAKE_CASE ) # expanding the data slice to One dimenssion UpperCamelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE ) return focus_list, data_featuremap def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="average_pool" ) -> Optional[int]: """simple docstring""" UpperCamelCase = len(featuremaps[0] ) UpperCamelCase = int(size_map / size_pooling ) UpperCamelCase = [] for i_map in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCamelCase = featuremaps[i_map] UpperCamelCase = [] for i_focus in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for j_focus in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_SCREAMING_SNAKE_CASE ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) featuremap_pooled.append(_SCREAMING_SNAKE_CASE ) return featuremap_pooled def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCamelCase = np.shape(data[i] ) UpperCamelCase = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase = data_listed.getA().tolist()[0] data_expanded.extend(_SCREAMING_SNAKE_CASE ) UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE ) return data_expanded def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE ) UpperCamelCase = np.shape(_SCREAMING_SNAKE_CASE ) UpperCamelCase = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = [] UpperCamelCase = 0 for i_map in range(_SCREAMING_SNAKE_CASE ): UpperCamelCase = np.ones((size_map, size_map) ) for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for j in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase = pd_pool[ i_pool ] UpperCamelCase = i_pool + 1 UpperCamelCase = np.multiply( _SCREAMING_SNAKE_CASE , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_SCREAMING_SNAKE_CASE ) return pd_all def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=bool ) -> List[str]: """simple docstring""" print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(_SCREAMING_SNAKE_CASE )) ) print((""" - - Shape: Teach_Data """, np.shape(_SCREAMING_SNAKE_CASE )) ) UpperCamelCase = 0 UpperCamelCase = [] UpperCamelCase = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase = 0 print(F"-------------Learning Time {rp}--------------" ) for p in range(len(_SCREAMING_SNAKE_CASE ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase = np.asmatrix(datas_train[p] ) UpperCamelCase = np.asarray(datas_teach[p] ) UpperCamelCase ,UpperCamelCase = self.convolute( _SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga ) UpperCamelCase = np.shape(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self._expand(_SCREAMING_SNAKE_CASE ) UpperCamelCase = data_bp_input UpperCamelCase = np.dot(_SCREAMING_SNAKE_CASE , self.vji.T ) - self.thre_bpa UpperCamelCase = self.sig(_SCREAMING_SNAKE_CASE ) UpperCamelCase = np.dot(_SCREAMING_SNAKE_CASE , self.wkj.T ) - self.thre_bpa UpperCamelCase = self.sig(_SCREAMING_SNAKE_CASE ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase = np.multiply( (data_teach - bp_outa) , np.multiply(_SCREAMING_SNAKE_CASE , (1 - bp_outa) ) ) UpperCamelCase = np.multiply( np.dot(_SCREAMING_SNAKE_CASE , self.wkj ) , np.multiply(_SCREAMING_SNAKE_CASE , (1 - bp_outa) ) ) UpperCamelCase = np.dot(_SCREAMING_SNAKE_CASE , self.vji ) UpperCamelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase = pd_conva_pooled.T.getA().tolist() UpperCamelCase = self._calculate_gradient_from_pool( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase = self.rate_weight * np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase = rp + 1 UpperCamelCase = error_count / patterns all_mse.append(_SCREAMING_SNAKE_CASE ) def draw_error(): UpperCamelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_SCREAMING_SNAKE_CASE , """+-""" ) plt.plot(_SCREAMING_SNAKE_CASE , """r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(_SCREAMING_SNAKE_CASE , alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F" - - Mse: {mse:.6f}") ) if draw_e: draw_error() return mse def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(_SCREAMING_SNAKE_CASE )) ) for p in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCamelCase = np.asmatrix(datas_test[p] ) UpperCamelCase ,UpperCamelCase = self.convolute( _SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga ) UpperCamelCase = self._expand(_SCREAMING_SNAKE_CASE ) UpperCamelCase = data_bp_input UpperCamelCase = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase = self.sig(_SCREAMING_SNAKE_CASE ) UpperCamelCase = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase = self.sig(_SCREAMING_SNAKE_CASE ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase = [list(map(self.do_round , _SCREAMING_SNAKE_CASE ) ) for each in produce_out] return np.asarray(_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = np.asmatrix(_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.convolute( _SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: UpperCamelCase = 1.5 UpperCamelCase = int(factor * num_class_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: UpperCamelCase = client.query(text=__UpperCamelCase ) if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4: break else: UpperCamelCase = int(factor * num_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , ) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase ) with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open( F"{class_data_dir}/images.txt" , """w""" ) as fa: while total < num_class_images: UpperCamelCase = class_images[count] count += 1 try: UpperCamelCase = requests.get(images["""url"""] ) if img.status_code == 200: UpperCamelCase = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__ ( )-> str: UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
321
1
'''simple docstring''' SCREAMING_SNAKE_CASE__ = [0, 2, 4, 6, 8] SCREAMING_SNAKE_CASE__ = [1, 3, 5, 7, 9] def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCamelCase = 0 for digit in range(10 ): UpperCamelCase = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __UpperCamelCase , __UpperCamelCase ) return result UpperCamelCase = 0 for digita in range(10 ): UpperCamelCase = digita if (remainder + digita) % 2 == 0: UpperCamelCase = ODD_DIGITS else: UpperCamelCase = EVEN_DIGITS for digita in other_parity_digits: UpperCamelCase = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCamelCase , __UpperCamelCase , ) return result def lowercase__ ( __UpperCamelCase = 9 )-> int: UpperCamelCase = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase ) return result if __name__ == "__main__": print(f'{solution() = }')
321
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class a_ : lowercase = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} ) lowercase = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = {} if self.train_dir is not None: UpperCamelCase = self.train_dir if self.validation_dir is not None: UpperCamelCase = self.validation_dir UpperCamelCase = data_files if data_files else None @dataclass class a_ : lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) lowercase = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class a_ ( lowerCamelCase ): lowercase = field( default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__ ( )-> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0: UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase = split["""train"""] UpperCamelCase = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase ) if training_args.do_train: UpperCamelCase = ds["""train"""].column_names else: UpperCamelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase = data_args.image_column_name elif "image" in column_names: UpperCamelCase = """image""" elif "img" in column_names: UpperCamelCase = """img""" else: UpperCamelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase = image_processor.size["""shortest_edge"""] else: UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase = Compose( [ Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__UpperCamelCase ): UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__UpperCamelCase ) # Compute absolute learning rate UpperCamelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: UpperCamelCase = None if training_args.resume_from_checkpoint is not None: UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase = last_checkpoint UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCamelCase ) trainer.save_metrics("""eval""" , __UpperCamelCase ) # Write model card and (optionally) push to hub UpperCamelCase = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCamelCase ) else: trainer.create_model_card(**__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
321
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(4_2) SCREAMING_SNAKE_CASE__ = 'bert-base-cased' SCREAMING_SNAKE_CASE__ = 'fp16' SCREAMING_SNAKE_CASE__ = 'bf16' SCREAMING_SNAKE_CASE__ = [FPaa, BFaa] @require_fsdp @require_cuda class a_ ( lowerCamelCase ): def A__ ( self ) -> List[str]: """simple docstring""" super().setUp() UpperCamelCase = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def A__ ( self ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase = self.dist_env.copy() UpperCamelCase = F"{i + 1}" UpperCamelCase = strategy with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def A__ ( self ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase = self.dist_env.copy() UpperCamelCase = prefetch_policy with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def A__ ( self ) -> List[Any]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase = self.dist_env.copy() UpperCamelCase = state_dict_type with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE ) for policy in FSDP_AUTO_WRAP_POLICY: UpperCamelCase = self.dist_env.copy() UpperCamelCase = policy if policy == "TRANSFORMER_BASED_WRAP": UpperCamelCase = """BertLayer""" elif policy == "SIZE_BASED_WRAP": UpperCamelCase = """2000""" with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) UpperCamelCase = self.dist_env.copy() UpperCamelCase = """TRANSFORMER_BASED_WRAP""" UpperCamelCase = """T5Layer""" with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm: fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) UpperCamelCase = self.dist_env.copy() UpperCamelCase = """SIZE_BASED_WRAP""" UpperCamelCase = """0""" with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def A__ ( self ) -> Any: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: UpperCamelCase = self.dist_env.copy() UpperCamelCase = mp_dtype with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = Accelerator() if mp_dtype == "fp16": UpperCamelCase = torch.floataa elif mp_dtype == "bf16": UpperCamelCase = torch.bfloataa UpperCamelCase = MixedPrecision(param_dtype=_SCREAMING_SNAKE_CASE , reduce_dtype=_SCREAMING_SNAKE_CASE , buffer_dtype=_SCREAMING_SNAKE_CASE ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _SCREAMING_SNAKE_CASE ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , _SCREAMING_SNAKE_CASE ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: UpperCamelCase = self.dist_env.copy() UpperCamelCase = str(_SCREAMING_SNAKE_CASE ).lower() with mockenv_context(**_SCREAMING_SNAKE_CASE ): UpperCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_SCREAMING_SNAKE_CASE ) ) @require_fsdp @require_multi_gpu @slow class a_ ( lowerCamelCase ): def A__ ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() UpperCamelCase = 0.8_2 UpperCamelCase = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] UpperCamelCase = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } UpperCamelCase = 160 UpperCamelCase = 160 UpperCamelCase = inspect.getfile(accelerate.test_utils ) UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = os.path.join(self.test_scripts_folder , """test_performance.py""" ) UpperCamelCase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: UpperCamelCase = cmd.copy() for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ): if strategy.lower() in config: cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) UpperCamelCase = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase = cmd.copy() cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) for state_dict_type in FSDP_STATE_DICT_TYPE: UpperCamelCase = cmd_config[:state_dict_config_index] cmd_config.append(F"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) UpperCamelCase = cmd_config[:-1] UpperCamelCase = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ F"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) UpperCamelCase = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): UpperCamelCase = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ): if strategy.lower() in spec: cmd_config.append(F"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--peak_memory_upper_bound={peak_mem_upper_bound}", F"--n_train={self.n_train}", F"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
321
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = 2_5_6 class a_ ( lowerCamelCase ): lowercase = ["""melgan"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__() # From MELGAN UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training. UpperCamelCase = 4.0 # Largest value for most examples UpperCamelCase = 128 self.register_modules( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" UpperCamelCase ,UpperCamelCase = output_range if clip: UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = input_range UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs # Scale to [0, 1]. UpperCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = input_tokens > 0 UpperCamelCase ,UpperCamelCase = self.notes_encoder( encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.continuous_encoder( encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = noise_time if not torch.is_tensor(_SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase = self.decoder( encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE ) return logits @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_SCREAMING_SNAKE_CASE )}." ) UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCamelCase = ones UpperCamelCase = self.scale_features( _SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCamelCase = self.decode( encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] ) UpperCamelCase = mel[:1] UpperCamelCase = mel.cpu().float().numpy() UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
321
1
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , )-> int: output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , ) else: export( __UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , ) @torch.no_grad() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> int: UpperCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCamelCase = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: UpperCamelCase = """cpu""" UpperCamelCase = Path(__UpperCamelCase ) # VAE DECODER UpperCamelCase = AutoencoderKL.from_pretrained(model_path + """/vae""" ) UpperCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part UpperCamelCase = vae_decoder.decode onnx_export( __UpperCamelCase , model_args=( torch.randn(1 , __UpperCamelCase , 25 , 25 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__UpperCamelCase , ) del vae_decoder if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=1_4, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') SCREAMING_SNAKE_CASE__ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 4000000 )-> int: UpperCamelCase = [] UpperCamelCase ,UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS} def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: UpperCamelCase = TOKENIZER_CLASSES else: UpperCamelCase = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: UpperCamelCase = TOKENIZER_CLASSES[tokenizer_name] UpperCamelCase = True if checkpoint_name is None: UpperCamelCase = list(tokenizer_class.max_model_input_sizes.keys() ) else: UpperCamelCase = [checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer UpperCamelCase = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: UpperCamelCase ,UpperCamelCase = checkpoint.split("""/""" ) UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase ) elif add_prefix: UpperCamelCase = checkpoint UpperCamelCase = dump_path else: UpperCamelCase = None UpperCamelCase = dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: UpperCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] UpperCamelCase = file_path.split(__UpperCamelCase )[-1][0] if next_char == "/": UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) UpperCamelCase = tokenizer.save_pretrained( __UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(__UpperCamelCase ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.' ) parser.add_argument( '--tokenizer_name', default=None, type=str, help=( f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ' 'download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--checkpoint_name', default=None, type=str, help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.', ) parser.add_argument( '--force_download', action='store_true', help='Re-download checkpoints.', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: # Base Case if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]: UpperCamelCase = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
321
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['ConditionalDetrFeatureExtractor'] SCREAMING_SNAKE_CASE__ = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 2000000 )-> int: UpperCamelCase = [0 for i in range(n + 1 )] UpperCamelCase = 1 UpperCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCamelCase ): UpperCamelCase = 1 UpperCamelCase = 0 for i in range(__UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=23 , _SCREAMING_SNAKE_CASE=11 , _SCREAMING_SNAKE_CASE=True , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = act_dim UpperCamelCase = state_dim UpperCamelCase = hidden_size UpperCamelCase = max_length UpperCamelCase = is_training def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length) ) UpperCamelCase = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def A__ ( self ) -> Optional[Any]: """simple docstring""" return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" UpperCamelCase = DecisionTransformerModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase = (DecisionTransformerModel,) if is_torch_available() else () lowercase = () lowercase = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowercase = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = DecisionTransformerModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def A__ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) @slow def A__ ( self ) -> Optional[Any]: """simple docstring""" for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = DecisionTransformerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(_SCREAMING_SNAKE_CASE )] , _SCREAMING_SNAKE_CASE ) @require_torch class a_ ( unittest.TestCase ): @slow def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform UpperCamelCase = 10 # defined by the RL environment, may be normalized UpperCamelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" ) UpperCamelCase = model.to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = model.config torch.manual_seed(0 ) UpperCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset() UpperCamelCase = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCamelCase = state UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) UpperCamelCase = torch.zeros(1 , 0 , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) UpperCamelCase = torch.tensor(0 , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 ) for step in range(_SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_SCREAMING_SNAKE_CASE )] , dim=1 ) UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=_SCREAMING_SNAKE_CASE )] , dim=1 ) UpperCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCamelCase ,UpperCamelCase ,UpperCamelCase = model( states=_SCREAMING_SNAKE_CASE , actions=_SCREAMING_SNAKE_CASE , rewards=_SCREAMING_SNAKE_CASE , returns_to_go=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ), 1.0, False, {}, ) UpperCamelCase = action_pred[0, -1] UpperCamelCase = torch.cat([states, state] , dim=1 ) UpperCamelCase = returns_to_go[0, -1] - reward UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCamelCase = torch.cat( [timesteps, torch.ones((1, 1) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
321
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
1
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '▁' SCREAMING_SNAKE_CASE__ = {'vocab_file': 'prophetnet.tokenizer'} SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } SCREAMING_SNAKE_CASE__ = { 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } SCREAMING_SNAKE_CASE__ = { 'microsoft/xprophetnet-large-wiki100-cased': 5_1_2, } def lowercase__ ( __UpperCamelCase )-> List[str]: UpperCamelCase = collections.OrderedDict() with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader: UpperCamelCase = reader.readlines() for index, token in enumerate(__UpperCamelCase ): UpperCamelCase = token.rstrip("""\n""" ) UpperCamelCase = index return vocab class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , ) try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab UpperCamelCase = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(10 ): UpperCamelCase = F"[unused{i}]" UpperCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab UpperCamelCase = 12 UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_SCREAMING_SNAKE_CASE ) def __getstate__( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.__dict__.copy() UpperCamelCase = None return state def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCamelCase = {} UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) if token_ids_a is None: return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A__ ( self ) -> Any: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return UpperCamelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi: UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] UpperCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
321
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
1
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = initializer_range UpperCamelCase = use_labels UpperCamelCase = scope def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = self.get_config() return config, input_ids, input_mask, token_labels def A__ ( self ) -> Optional[int]: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" ( ( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" UpperCamelCase = BertGenerationEncoder(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" UpperCamelCase = True UpperCamelCase = BertGenerationEncoder(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = BertGenerationDecoder(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval() # first forward pass UpperCamelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["""hidden_states"""][0] UpperCamelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["""hidden_states"""][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" UpperCamelCase = BertGenerationDecoder(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = BertGenerationEncoderTester(self ) UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs() UpperCamelCase = """bert""" self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" ( ( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) ,( UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase = None self.model_tester.create_and_check_model_as_decoder( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE ) @slow def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_torch class a_ ( unittest.TestCase ): @slow def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) UpperCamelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): UpperCamelCase = model(_SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class a_ ( unittest.TestCase ): @slow def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) UpperCamelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): UpperCamelCase = model(_SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
321
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
321
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] SCREAMING_SNAKE_CASE__ = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> Optional[Any]: UpperCamelCase = None if token is not None: UpperCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" UpperCamelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() UpperCamelCase = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) UpperCamelCase = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__UpperCamelCase ): UpperCamelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> List[str]: UpperCamelCase = None if token is not None: UpperCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" UpperCamelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() UpperCamelCase = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) UpperCamelCase = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__UpperCamelCase ): UpperCamelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str: UpperCamelCase = None if token is not None: UpperCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} UpperCamelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase ) UpperCamelCase = result.headers["""Location"""] UpperCamelCase = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase ) UpperCamelCase = os.path.join(__UpperCamelCase , F"{artifact_name}.zip" ) with open(__UpperCamelCase , """wb""" ) as fp: fp.write(response.content ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> str: UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = None with zipfile.ZipFile(__UpperCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__UpperCamelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__UpperCamelCase ) as f: for line in f: UpperCamelCase = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCamelCase = line[: line.index(""": """ )] UpperCamelCase = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed UpperCamelCase = line[len("""FAILED """ ) :] failed_tests.append(__UpperCamelCase ) elif filename == "job_name.txt": UpperCamelCase = line if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` " F"and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" """ problem.""" ) UpperCamelCase = None if job_name and job_links: UpperCamelCase = job_links.get(__UpperCamelCase , __UpperCamelCase ) # A list with elements of the form (line of error, error, failed test) UpperCamelCase = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )] return result def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> int: UpperCamelCase = [] UpperCamelCase = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) ) return errors def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> Optional[Any]: UpperCamelCase = Counter() counter.update([x[1] for x in logs] ) UpperCamelCase = counter.most_common() UpperCamelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCamelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} UpperCamelCase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) ) return r def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): UpperCamelCase = test.split("""/""" )[2] else: UpperCamelCase = None return test def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> Optional[Any]: UpperCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCamelCase = [x for x in logs if x[2] is not None] UpperCamelCase = {x[2] for x in logs} UpperCamelCase = {} for test in tests: UpperCamelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCamelCase = counter.most_common() UpperCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCamelCase = sum(error_counts.values() ) if n_errors > 0: UpperCamelCase = {"""count""": n_errors, """errors""": error_counts} UpperCamelCase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) ) return r def lowercase__ ( __UpperCamelCase )-> Optional[int]: UpperCamelCase = """| no. | error | status |""" UpperCamelCase = """|-:|:-|:-|""" UpperCamelCase = [header, sep] for error in reduced_by_error: UpperCamelCase = reduced_by_error[error]["""count"""] UpperCamelCase = F"| {count} | {error[:100]} | |" lines.append(__UpperCamelCase ) return "\n".join(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = """| model | no. of errors | major error | count |""" UpperCamelCase = """|-:|-:|-:|-:|""" UpperCamelCase = [header, sep] for model in reduced_by_model: UpperCamelCase = reduced_by_model[model]["""count"""] UpperCamelCase ,UpperCamelCase = list(reduced_by_model[model]["""errors"""].items() )[0] UpperCamelCase = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(__UpperCamelCase ) return "\n".join(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') SCREAMING_SNAKE_CASE__ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) SCREAMING_SNAKE_CASE__ = get_job_links(args.workflow_run_id, token=args.token) SCREAMING_SNAKE_CASE__ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: SCREAMING_SNAKE_CASE__ = k.find(' / ') SCREAMING_SNAKE_CASE__ = k[index + len(' / ') :] SCREAMING_SNAKE_CASE__ = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) SCREAMING_SNAKE_CASE__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) SCREAMING_SNAKE_CASE__ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error SCREAMING_SNAKE_CASE__ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors SCREAMING_SNAKE_CASE__ = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) SCREAMING_SNAKE_CASE__ = reduce_by_error(errors) SCREAMING_SNAKE_CASE__ = reduce_by_model(errors) SCREAMING_SNAKE_CASE__ = make_github_table(reduced_by_error) SCREAMING_SNAKE_CASE__ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a_ ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Any: """simple docstring""" UpperCamelCase = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def A__ ( self ) -> Dict: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = ViTImageProcessor if is_vision_available() else None def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = EfficientFormerImageProcessorTester(self ) @property def A__ ( self ) -> int: """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) ) def A__ ( self ) -> Optional[int]: """simple docstring""" pass def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
321
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: UpperCamelCase = mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: UpperCamelCase = max( mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , ) UpperCamelCase = val return f[i][j] def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: UpperCamelCase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: UpperCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: UpperCamelCase = dp[i - 1][w_] return dp[n][w_], dp def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: if not (isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(__UpperCamelCase , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) UpperCamelCase = len(__UpperCamelCase ) if num_items != len(__UpperCamelCase ): UpperCamelCase = ( """The number of weights must be the same as the number of values.\n""" F"But got {num_items} weights and {len(__UpperCamelCase )} values" ) raise ValueError(__UpperCamelCase ) for i in range(__UpperCamelCase ): if not isinstance(wt[i] , __UpperCamelCase ): UpperCamelCase = ( """All weights must be integers but got weight of """ F"type {type(wt[i] )} at index {i}" ) raise TypeError(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = knapsack(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) UpperCamelCase = set() _construct_solution(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return optimal_val, example_optional_set def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__UpperCamelCase , __UpperCamelCase , i - 1 , __UpperCamelCase , __UpperCamelCase ) else: optimal_set.add(__UpperCamelCase ) _construct_solution(__UpperCamelCase , __UpperCamelCase , i - 1 , j - wt[i - 1] , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [3, 2, 4, 4] SCREAMING_SNAKE_CASE__ = [4, 3, 2, 3] SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 6 SCREAMING_SNAKE_CASE__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
321
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase = count + min_val i += 1 def lowercase__ ( )-> Any: UpperCamelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
321
1
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
1
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class a_ ( lowerCamelCase ): lowercase = """M-CLIP""" def __init__( self , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=768 , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = transformerDimSize UpperCamelCase = imageDimSize super().__init__(**_SCREAMING_SNAKE_CASE ) class a_ ( lowerCamelCase ): lowercase = MCLIPConfig def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = XLMRobertaModel(_SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.transformer(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0] UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(_SCREAMING_SNAKE_CASE ), embs
321
'''simple docstring''' from __future__ import annotations import math class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = size # approximate the overall size of segment tree with given value UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCamelCase = [0 for i in range(0 , 4 * size )] UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 + 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if left_element == right_element: UpperCamelCase = a[left_element - 1] else: UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCamelCase = val if left_element != right_element: UpperCamelCase = val UpperCamelCase = val UpperCamelCase = True UpperCamelCase = True return True UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) return True def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCamelCase = (left_element + right_element) // 2 UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __str__( self ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] SCREAMING_SNAKE_CASE__ = 1_5 SCREAMING_SNAKE_CASE__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
321
1
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = 'ybelkada/fonts' def lowercase__ ( )-> int: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " """Pix2StructImageProcessor. Please upgrade torch.""" ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]: requires_backends(__UpperCamelCase , ["""torch"""] ) _check_torch_version() UpperCamelCase = image_tensor.unsqueeze(0 ) UpperCamelCase = torch.nn.functional.unfold(__UpperCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) UpperCamelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __UpperCamelCase , __UpperCamelCase , -1 ) UpperCamelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 36 , __UpperCamelCase = "black" , __UpperCamelCase = "white" , __UpperCamelCase = 5 , __UpperCamelCase = 5 , __UpperCamelCase = 5 , __UpperCamelCase = 5 , __UpperCamelCase = None , __UpperCamelCase = None , )-> Image.Image: requires_backends(__UpperCamelCase , """vision""" ) # Add new lines so that each line is no more than 80 characters. UpperCamelCase = textwrap.TextWrapper(width=80 ) UpperCamelCase = wrapper.wrap(text=__UpperCamelCase ) UpperCamelCase = """\n""".join(__UpperCamelCase ) if font_bytes is not None and font_path is None: UpperCamelCase = io.BytesIO(__UpperCamelCase ) elif font_path is not None: UpperCamelCase = font_path else: UpperCamelCase = hf_hub_download(__UpperCamelCase , """Arial.TTF""" ) UpperCamelCase = ImageFont.truetype(__UpperCamelCase , encoding="""UTF-8""" , size=__UpperCamelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. UpperCamelCase = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , __UpperCamelCase ) ) UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = temp_draw.textbbox((0, 0) , __UpperCamelCase , __UpperCamelCase ) # Create the actual image with a bit of padding around the text. UpperCamelCase = text_width + left_padding + right_padding UpperCamelCase = text_height + top_padding + bottom_padding UpperCamelCase = Image.new("""RGB""" , (image_width, image_height) , __UpperCamelCase ) UpperCamelCase = ImageDraw.Draw(__UpperCamelCase ) draw.text(xy=(left_padding, top_padding) , text=__UpperCamelCase , fill=__UpperCamelCase , font=__UpperCamelCase ) return image def lowercase__ ( __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> List[str]: requires_backends(__UpperCamelCase , """vision""" ) # Convert to PIL image if necessary UpperCamelCase = to_pil_image(__UpperCamelCase ) UpperCamelCase = render_text(__UpperCamelCase , **__UpperCamelCase ) UpperCamelCase = max(header_image.width , image.width ) UpperCamelCase = int(image.height * (new_width / image.width) ) UpperCamelCase = int(header_image.height * (new_width / header_image.width) ) UpperCamelCase = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary UpperCamelCase = to_numpy_array(__UpperCamelCase ) if infer_channel_dimension_format(__UpperCamelCase ) == ChannelDimension.LAST: UpperCamelCase = to_channel_dimension_format(__UpperCamelCase , ChannelDimension.LAST ) return new_image class a_ ( lowerCamelCase ): lowercase = ["""flattened_patches"""] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 2048 , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} UpperCamelCase = do_normalize UpperCamelCase = do_convert_rgb UpperCamelCase = max_patches UpperCamelCase = is_vqa def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> np.ndarray: """simple docstring""" requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch UpperCamelCase = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST ) UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = patch_size["""height"""], patch_size["""width"""] UpperCamelCase ,UpperCamelCase = get_image_size(_SCREAMING_SNAKE_CASE ) # maximize scale s.t. UpperCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) UpperCamelCase = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 ) UpperCamelCase = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 ) UpperCamelCase = max(num_feasible_rows * patch_height , 1 ) UpperCamelCase = max(num_feasible_cols * patch_width , 1 ) UpperCamelCase = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] UpperCamelCase = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = patches.shape UpperCamelCase = patches_shape[1] UpperCamelCase = patches_shape[2] UpperCamelCase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] UpperCamelCase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] UpperCamelCase = row_ids.to(torch.floataa ) UpperCamelCase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] UpperCamelCase = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] UpperCamelCase = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float() UpperCamelCase = to_numpy_array(_SCREAMING_SNAKE_CASE ) return result def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> np.ndarray: """simple docstring""" if image.dtype == np.uinta: UpperCamelCase = image.astype(np.floataa ) # take mean across the whole `image` UpperCamelCase = np.mean(_SCREAMING_SNAKE_CASE ) UpperCamelCase = np.std(_SCREAMING_SNAKE_CASE ) UpperCamelCase = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> ImageInput: """simple docstring""" UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase = patch_size if patch_size is not None else self.patch_size UpperCamelCase = max_patches if max_patches is not None else self.max_patches UpperCamelCase = self.is_vqa if kwargs.get("""data_format""" , _SCREAMING_SNAKE_CASE ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) UpperCamelCase = kwargs.pop("""font_bytes""" , _SCREAMING_SNAKE_CASE ) UpperCamelCase = kwargs.pop("""font_path""" , _SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase = [header_text] * len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = [ render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE ) for i, image in enumerate(_SCREAMING_SNAKE_CASE ) ] if do_normalize: UpperCamelCase = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images] # convert to torch tensor and permute UpperCamelCase = [ self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE ) for image in images ] # create attention mask in numpy UpperCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] UpperCamelCase = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE ) return encoded_outputs
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 1000 )-> int: UpperCamelCase = -1 UpperCamelCase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase = n - a - b if c * c == (a * a + b * b): UpperCamelCase = a * b * c if candidate >= product: UpperCamelCase = candidate return product if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
'''simple docstring''' import argparse import struct import unittest class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = data # Initialize hash values UpperCamelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCamelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes: """simple docstring""" UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64)) UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) -> None: """simple docstring""" UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 ) UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 ) UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class a_ ( unittest.TestCase ): def A__ ( self ) -> None: """simple docstring""" import hashlib UpperCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() ) def lowercase__ ( )-> None: import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCamelCase = f.read() else: UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
321
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) SCREAMING_SNAKE_CASE__ = _symbol_database.Default() SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) SCREAMING_SNAKE_CASE__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" SCREAMING_SNAKE_CASE__ = 4_5 SCREAMING_SNAKE_CASE__ = 1_5_8_1 SCREAMING_SNAKE_CASE__ = 1_5_1_7 SCREAMING_SNAKE_CASE__ = 1_5_7_0 SCREAMING_SNAKE_CASE__ = 1_5_8_4 SCREAMING_SNAKE_CASE__ = 1_7_9_3 SCREAMING_SNAKE_CASE__ = 1_7_9_5 SCREAMING_SNAKE_CASE__ = 1_9_1_6 SCREAMING_SNAKE_CASE__ = 1_8_6_4 SCREAMING_SNAKE_CASE__ = 1_9_0_5 SCREAMING_SNAKE_CASE__ = 1_9_1_9 SCREAMING_SNAKE_CASE__ = 2_4_2_9 SCREAMING_SNAKE_CASE__ = 2_2_0_8 SCREAMING_SNAKE_CASE__ = 2_4_1_8 SCREAMING_SNAKE_CASE__ = 2_3_2_3 SCREAMING_SNAKE_CASE__ = 2_4_0_7 # @@protoc_insertion_point(module_scope)
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]: UpperCamelCase = """""" for i in table: res += inp[i - 1] return res def lowercase__ ( __UpperCamelCase )-> List[str]: return data[1:] + data[0] def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]: UpperCamelCase = """""" for i in range(len(__UpperCamelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple: UpperCamelCase = int("""0b""" + data[0] + data[-1] , 2 ) UpperCamelCase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple: UpperCamelCase = message[:4] UpperCamelCase = message[4:] UpperCamelCase = apply_table(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = xor(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase = apply_sbox(__UpperCamelCase , temp[:4] ) # noqa: E741 UpperCamelCase = apply_sbox(__UpperCamelCase , temp[4:] ) UpperCamelCase = """0""" * (2 - len(__UpperCamelCase )) + l # noqa: E741 UpperCamelCase = """0""" * (2 - len(__UpperCamelCase )) + r UpperCamelCase = apply_table(l + r , __UpperCamelCase ) UpperCamelCase = xor(__UpperCamelCase , __UpperCamelCase ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('Enter 10 bit key: ') SCREAMING_SNAKE_CASE__ = input('Enter 8 bit message: ') SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 1_0, 9] SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ = temp[:5] SCREAMING_SNAKE_CASE__ = temp[5:] SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ = apply_table(message, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption SCREAMING_SNAKE_CASE__ = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
321
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
1
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __UpperCamelCase )-> str: if "://" in dataset_path: UpperCamelCase = dataset_path.split("""://""" )[1] return dataset_path def lowercase__ ( __UpperCamelCase )-> bool: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: UpperCamelCase = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def lowercase__ ( )-> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCamelCase = None UpperCamelCase = None UpperCamelCase = threading.Lock()
321
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class a_ ( lowerCamelCase ): lowercase = ["""image_processor""", """tokenizer"""] lowercase = """BlipImageProcessor""" lowercase = """AutoTokenizer""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # add QFormer tokenizer UpperCamelCase = qformer_tokenizer def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) UpperCamelCase = BatchFeature() if text is not None: UpperCamelCase = self.tokenizer( text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) encoding.update(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.qformer_tokenizer( text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase = qformer_text_encoding.pop("""input_ids""" ) UpperCamelCase = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) encoding.update(_SCREAMING_SNAKE_CASE ) return encoding def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , """qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) return super().save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def A__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""qformer_tokenizer""" ) UpperCamelCase = cls._get_arguments_from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) args.append(_SCREAMING_SNAKE_CASE ) return cls(*_SCREAMING_SNAKE_CASE )
321
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class a_ ( unittest.TestCase ): @slow def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) UpperCamelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim UpperCamelCase = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCamelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""].detach() self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) @slow def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) UpperCamelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCamelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim UpperCamelCase = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCamelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""].detach() self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
321
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '▁' SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } SCREAMING_SNAKE_CASE__ = { 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off SCREAMING_SNAKE_CASE__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ["""input_ids""", """attention_mask"""] lowercase = [] lowercase = [] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs UpperCamelCase = legacy_behaviour super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase = 1 UpperCamelCase = len(self.sp_model ) UpperCamelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE ) } UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()} UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCamelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCamelCase = src_lang if src_lang is not None else """eng_Latn""" UpperCamelCase = self.lang_code_to_id[self._src_lang] UpperCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.__dict__.copy() UpperCamelCase = None UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCamelCase = {} UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def A__ ( self ) -> str: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def A__ ( self ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase = [1] * len(self.prefix_tokens ) UpperCamelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) UpperCamelCase = src_lang UpperCamelCase = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCamelCase = tgt_lang_id return inputs def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return UpperCamelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi: UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "eng_Latn" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fra_Latn" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding: """simple docstring""" UpperCamelCase = src_lang UpperCamelCase = tgt_lang return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: UpperCamelCase = [] UpperCamelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase = [self.cur_lang_code] UpperCamelCase = [self.eos_token_id] def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: UpperCamelCase = [] UpperCamelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase = [self.cur_lang_code] UpperCamelCase = [self.eos_token_id]
321
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: UpperCamelCase = 1.5 UpperCamelCase = int(factor * num_class_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: UpperCamelCase = client.query(text=__UpperCamelCase ) if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4: break else: UpperCamelCase = int(factor * num_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , ) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase ) with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open( F"{class_data_dir}/images.txt" , """w""" ) as fa: while total < num_class_images: UpperCamelCase = class_images[count] count += 1 try: UpperCamelCase = requests.get(images["""url"""] ) if img.status_code == 200: UpperCamelCase = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__ ( )-> str: UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
321
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
321
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class a_ : lowercase = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} ) lowercase = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = {} if self.train_dir is not None: UpperCamelCase = self.train_dir if self.validation_dir is not None: UpperCamelCase = self.validation_dir UpperCamelCase = data_files if data_files else None @dataclass class a_ : lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) lowercase = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class a_ ( lowerCamelCase ): lowercase = field( default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__ ( )-> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0: UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase = split["""train"""] UpperCamelCase = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase ) if training_args.do_train: UpperCamelCase = ds["""train"""].column_names else: UpperCamelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase = data_args.image_column_name elif "image" in column_names: UpperCamelCase = """image""" elif "img" in column_names: UpperCamelCase = """img""" else: UpperCamelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase = image_processor.size["""shortest_edge"""] else: UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase = Compose( [ Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__UpperCamelCase ): UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__UpperCamelCase ) # Compute absolute learning rate UpperCamelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: UpperCamelCase = None if training_args.resume_from_checkpoint is not None: UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase = last_checkpoint UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCamelCase ) trainer.save_metrics("""eval""" , __UpperCamelCase ) # Write model card and (optionally) push to hub UpperCamelCase = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCamelCase ) else: trainer.create_model_card(**__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
321
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''timm_backbone''' def __init__( self : Optional[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=3 , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict , ) ->Tuple: """simple docstring""" super().__init__(**__UpperCAmelCase ) a = backbone a = num_channels a = features_only a = use_pretrained_backbone a = True a = out_indices if out_indices is not None else (-1,)
0
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = 2_5_6 class a_ ( lowerCamelCase ): lowercase = ["""melgan"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__() # From MELGAN UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training. UpperCamelCase = 4.0 # Largest value for most examples UpperCamelCase = 128 self.register_modules( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" UpperCamelCase ,UpperCamelCase = output_range if clip: UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = input_range UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs # Scale to [0, 1]. UpperCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = input_tokens > 0 UpperCamelCase ,UpperCamelCase = self.notes_encoder( encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.continuous_encoder( encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = noise_time if not torch.is_tensor(_SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase = self.decoder( encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE ) return logits @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_SCREAMING_SNAKE_CASE )}." ) UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCamelCase = ones UpperCamelCase = self.scale_features( _SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCamelCase = self.decode( encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] ) UpperCamelCase = mel[:1] UpperCamelCase = mel.cpu().float().numpy() UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
321
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: Dict ={ 'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Tuple =['AlbertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: str =['AlbertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[str] =[ 'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[Any] =[ 'FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 4000000 )-> int: UpperCamelCase = [] UpperCamelCase ,UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
321
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' ) return image def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple: """simple docstring""" lowercase__ = dct.pop(A ) lowercase__ = val def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" ) lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict lowercase__ = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) ) lowercase__ = qkv_bias def _SCREAMING_SNAKE_CASE (A ) -> List[Any]: """simple docstring""" lowercase__ = 364 if '''coco''' in model_name else 224 lowercase__ = InstructBlipVisionConfig(image_size=A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32_001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() lowercase__ = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A ) return config, image_size @torch.no_grad() def _SCREAMING_SNAKE_CASE (A , A=None , A=False ) -> Optional[Any]: """simple docstring""" lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowercase__ = LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowercase__ ,lowercase__ = get_blipa_config(A ) lowercase__ = InstructBlipForConditionalGeneration(A ).eval() lowercase__ = { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowercase__ ,lowercase__ = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowercase__ ,lowercase__ ,lowercase__ = load_model_and_preprocess( name=A , model_type=A , is_eval=A , device=A ) original_model.eval() print('''Done!''' ) # update state dict keys lowercase__ = original_model.state_dict() lowercase__ = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase__ = state_dict.pop(A ) if key.startswith('''Qformer.bert''' ): lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowercase__ = key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowercase__ = key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowercase__ = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowercase__ = key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowercase__ = key.replace('''t5''' , '''language''' ) lowercase__ = val # read in qv biases read_in_q_v_bias(A , A ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(A , strict=A ) lowercase__ = load_demo_image() lowercase__ = '''What is unusual about this image?''' # create processor lowercase__ = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=A , image_std=A ) lowercase__ = InstructBlipProcessor( image_processor=A , tokenizer=A , qformer_tokenizer=A , ) lowercase__ = processor(images=A , text=A , return_tensors='''pt''' ).to(A ) # make sure processor creates exact same pixel values lowercase__ = vis_processors['''eval'''](A ).unsqueeze(0 ).to(A ) lowercase__ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A ) original_model.to(A ) hf_model.to(A ) with torch.no_grad(): if "vicuna" in model_name: lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowercase__ = hf_model(**A ).logits else: lowercase__ = original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(A ) lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowercase__ = hf_model(**A , labels=A ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowercase__ = 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , A , atol=A ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowercase__ = hf_model.generate( **A , do_sample=A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowercase__ = 2 print('''Original generation:''' , A ) lowercase__ = processor.batch_decode(A , skip_special_tokens=A ) lowercase__ = [text.strip() for text in output_text] print('''HF generation:''' , A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A ) hf_model.save_pretrained(A ) if push_to_hub: processor.push_to_hub(f"Salesforce/{model_name}" ) hf_model.push_to_hub(f"Salesforce/{model_name}" ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() lowerCamelCase : Dict = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: # Base Case if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]: UpperCamelCase = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
321
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = 0 A : Any = len(snake_case__ ) for i in range(n - 1 ): for j in range(i + 1 , snake_case__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return arr, 0 A : Any = len(snake_case__ ) // 2 A : List[Any] = arr[0:mid] A : Union[str, Any] = arr[mid:] A, A : List[str] = count_inversions_recursive(snake_case__ ) A, A : Dict = count_inversions_recursive(snake_case__ ) A, A : Dict = _count_cross_inversions(snake_case__ , snake_case__ ) A : Optional[int] = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = [] A : Optional[Any] = 0 while i < len(snake_case__ ) and j < len(snake_case__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(snake_case__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(snake_case__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A : List[Any] = count_inversions_bf(snake_case__ ) A, A : int = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , snake_case__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A : Tuple = count_inversions_bf(snake_case__ ) A, A : Optional[int] = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , snake_case__ ) # an empty list should also have zero inversions A : int = [] A : List[Any] = count_inversions_bf(snake_case__ ) A, A : Optional[int] = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , snake_case__ ) if __name__ == "__main__": main()
3
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 2000000 )-> int: UpperCamelCase = [0 for i in range(n + 1 )] UpperCamelCase = 1 UpperCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCamelCase ): UpperCamelCase = 1 UpperCamelCase = 0 for i in range(__UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
321
0
'''simple docstring''' class UpperCAmelCase_ : def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None: lowerCAmelCase = len(UpperCAmelCase__ ) lowerCAmelCase = [0] * len_array if len_array > 0: lowerCAmelCase = array[0] for i in range(1 , UpperCAmelCase__ ): lowerCAmelCase = self.prefix_sum[i - 1] + array[i] def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool: lowerCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(UpperCAmelCase__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
0
from typing import Any def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list: """simple docstring""" _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step _lowercase ={} _lowercase ={} for state in states_space: _lowercase =observations_space[0] _lowercase =( initial_probabilities[state] * emission_probabilities[state][observation] ) _lowercase =None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): _lowercase =observations_space[o] _lowercase =observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function _lowercase ='''''' _lowercase =-1 for k_state in states_space: _lowercase =( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: _lowercase =probability _lowercase =k_state # Update probabilities and pointers dicts _lowercase =( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) _lowercase =arg_max # The final observation _lowercase =observations_space[len(__snake_case ) - 1] # argmax for given final observation _lowercase ='''''' _lowercase =-1 for k_state in states_space: _lowercase =probabilities[(k_state, final_observation)] if probability > max_probability: _lowercase =probability _lowercase =k_state _lowercase =arg_max # Process pointers backwards _lowercase =last_state _lowercase =[] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) _lowercase =pointers[previous, observations_space[o]] result.reverse() return result def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" _validate_list(__snake_case , '''observations_space''' ) _validate_list(__snake_case , '''states_space''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" if not isinstance(_object , __snake_case ): _lowercase =F"{var_name} must be a list" raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): _lowercase =F"{var_name} must be a list of strings" raise ValueError(__snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _validate_dict(__snake_case , '''initial_probabilities''' , __snake_case ) _validate_nested_dict(__snake_case , '''transition_probabilities''' ) _validate_nested_dict(__snake_case , '''emission_probabilities''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None: """simple docstring""" if not isinstance(_object , __snake_case ): _lowercase =F"{var_name} must be a dict" raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): _lowercase =F"{var_name} all keys must be strings" raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): _lowercase ='''nested dictionary ''' if nested else '''''' _lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
5
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A : List[Any] = logging.get_logger(__name__) A : Union[str, Any] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __A( a ): snake_case_ = '''deformable_detr''' snake_case_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=300 , _snake_case=1_024 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=True , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=4 , _snake_case=4 , _snake_case=4 , _snake_case=False , _snake_case=300 , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , _snake_case=0.25 , _snake_case=False , **_snake_case , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_snake_case , _snake_case ): __a = backbone_config.get('''model_type''' ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(_snake_case ) __a = use_timm_backbone __a = backbone_config __a = num_channels __a = num_queries __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = init_xavier_std __a = encoder_layerdrop __a = auxiliary_loss __a = position_embedding_type __a = backbone __a = use_pretrained_backbone __a = dilation # deformable attributes __a = num_feature_levels __a = encoder_n_points __a = decoder_n_points __a = two_stage __a = two_stage_num_proposals __a = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __a = class_cost __a = bbox_cost __a = giou_cost # Loss coefficients __a = mask_loss_coefficient __a = dice_loss_coefficient __a = bbox_loss_coefficient __a = giou_loss_coefficient __a = eos_coefficient __a = focal_alpha __a = disable_custom_kernels super().__init__(is_encoder_decoder=_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
6
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
321
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'biogpt' def __init__( self : str,lowercase_ : Union[str, Any]=4_2_3_8_4,lowercase_ : List[str]=1_0_2_4,lowercase_ : Dict=2_4,lowercase_ : str=1_6,lowercase_ : Dict=4_0_9_6,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[str]=1_0_2_4,lowercase_ : Optional[Any]=0.02,lowercase_ : str=1E-12,lowercase_ : List[Any]=True,lowercase_ : Dict=True,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[Any]=0.0,lowercase_ : Union[str, Any]=1,lowercase_ : List[Any]=0,lowercase_ : Dict=2,**lowercase_ : List[str],)-> Dict: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = scale_embedding A__ = use_cache A__ = layerdrop A__ = activation_dropout super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ )
7
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = "canine" def __init__( self : str , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Optional[int]=1_2 , _UpperCamelCase : int=1_2 , _UpperCamelCase : Tuple=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=1_6_3_8_4 , _UpperCamelCase : List[Any]=1_6 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Optional[Any]=1e-12 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Union[str, Any]=0xe_0_0_0 , _UpperCamelCase : Dict=0xe_0_0_1 , _UpperCamelCase : Any=4 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[str]=8 , _UpperCamelCase : Any=1_6_3_8_4 , _UpperCamelCase : List[str]=1_2_8 , **_UpperCamelCase : str , ) ->List[Any]: super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps # Character config: snake_case_ = downsampling_rate snake_case_ = upsampling_kernel_size snake_case_ = num_hash_functions snake_case_ = num_hash_buckets snake_case_ = local_transformer_stride
8
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
0
from importlib import import_module from .logging import get_logger __lowerCAmelCase : str =get_logger(__name__) class _lowercase : '''simple docstring''' def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int: __SCREAMING_SNAKE_CASE : List[str] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = [] def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]: __SCREAMING_SNAKE_CASE : Optional[int] = obj __SCREAMING_SNAKE_CASE : str = target __SCREAMING_SNAKE_CASE : Dict = new __SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0] __SCREAMING_SNAKE_CASE : List[str] = {} __SCREAMING_SNAKE_CASE : Tuple = attrs or [] def __enter__( self :int ) -> Dict: *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: __SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __SCREAMING_SNAKE_CASE : int = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) __SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) __SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: __SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def __magic_name__( self :List[Any] ) -> List[Any]: self.__enter__() self._active_patches.append(self ) def __magic_name__( self :Optional[int] ) -> int: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
9
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __A = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase__: Optional[int] ="lm_head" lowerCamelCase__: Dict =getattr(__a , __a ) if weight_type is not None: lowerCamelCase__: str =getattr(__a , __a ).shape else: lowerCamelCase__: int =hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__: Dict =value elif weight_type == "weight_g": lowerCamelCase__: Optional[Any] =value elif weight_type == "weight_v": lowerCamelCase__: int =value elif weight_type == "bias": lowerCamelCase__: List[str] =value else: lowerCamelCase__: Union[str, Any] =value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =[] lowerCamelCase__: List[str] =fairseq_model.state_dict() lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__: int =False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__: str =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase__: Optional[Any] =True if "*" in mapped_key: lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2] lowerCamelCase__: List[str] =mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCamelCase__: List[str] ="weight_g" elif "weight_v" in name: lowerCamelCase__: Union[str, Any] ="weight_v" elif "bias" in name: lowerCamelCase__: Dict ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__: Tuple ="weight" else: lowerCamelCase__: List[Any] =None set_recursively(__a , __a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1] lowerCamelCase__: List[str] =name.split("." ) lowerCamelCase__: str =int(items[0] ) lowerCamelCase__: Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__: Dict =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCamelCase__: List[Any] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int: """simple docstring""" if config_path is not None: lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a ) else: lowerCamelCase__: List[Any] =UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase__: str =Dictionary.load_from_json(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__: Any =target_dict.pad_index lowerCamelCase__: int =target_dict.bos_index lowerCamelCase__: Any =target_dict.eos_index lowerCamelCase__: Dict =len(target_dict.symbols ) lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" ) if not os.path.isdir(__a ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) ) return os.makedirs(__a , exist_ok=__a ) lowerCamelCase__: Optional[Any] =target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__: Optional[Any] =42 lowerCamelCase__: List[Any] =43 with open(__a , "w" , encoding="utf-8" ) as vocab_handle: json.dump(__a , __a ) lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer( __a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , ) lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False lowerCamelCase__: Tuple =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a ) processor.save_pretrained(__a ) lowerCamelCase__: int =UniSpeechForCTC(__a ) else: lowerCamelCase__: int =UniSpeechForPreTraining(__a ) if is_finetuned: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCamelCase__: List[str] =model[0].eval() recursively_load_weights(__a , __a , __a ) hf_unispeech.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase = count + min_val i += 1 def lowercase__ ( )-> Any: UpperCamelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
321
0
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ): _A , _A : Union[str, Any] = position _A : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A : Dict = [] for position in positions: _A , _A : List[str] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(UpperCamelCase__ ) return permissible_positions def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ): if is_complete(UpperCamelCase__ ): return True for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ): _A , _A : Union[str, Any] = position if board[y][x] == 0: _A : Optional[Any] = curr + 1 if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ): return True _A : Union[str, Any] = 0 return False def _UpperCAmelCase (UpperCamelCase__ : int ): _A : Tuple = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _A : Union[str, Any] = 1 if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ): return board _A : Any = 0 _A : Union[str, Any] = f"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
11
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any]=False ): '''simple docstring''' __lowerCamelCase = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): __lowerCamelCase = """segformer.encoder.""" + key if key.startswith("""backbone""" ): __lowerCamelCase = key.replace("""backbone""" , """segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __lowerCamelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] __lowerCamelCase = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' ) if "norm" in key: __lowerCamelCase = key.replace("""norm""" , """layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __lowerCamelCase = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] __lowerCamelCase = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' ) if "layer_norm1" in key: __lowerCamelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: __lowerCamelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 __lowerCamelCase = key[key.find("""block""" ) + len("""block""" )] __lowerCamelCase = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' ) if "attn.q" in key: __lowerCamelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: __lowerCamelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: __lowerCamelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: __lowerCamelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: __lowerCamelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: __lowerCamelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: __lowerCamelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) __lowerCamelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __lowerCamelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] __lowerCamelCase = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' ) if key.startswith("""head""" ): __lowerCamelCase = key.replace("""head""" , """classifier""" ) __lowerCamelCase = value return new_state_dict def lowerCamelCase__ ( A__ : List[Any] , A__ : int ): '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __lowerCamelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' ) __lowerCamelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict __lowerCamelCase = kv_weight[ : config.hidden_sizes[i], : ] __lowerCamelCase = kv_bias[: config.hidden_sizes[i]] __lowerCamelCase = kv_weight[ config.hidden_sizes[i] :, : ] __lowerCamelCase = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return image @torch.no_grad() def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = SegformerConfig() __lowerCamelCase = False # set attributes based on model_name __lowerCamelCase = """huggingface/label-files""" if "segformer" in model_name: __lowerCamelCase = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: __lowerCamelCase = 150 __lowerCamelCase = """ade20k-id2label.json""" __lowerCamelCase = (1, 150, 128, 128) elif "city" in model_name: __lowerCamelCase = 19 __lowerCamelCase = """cityscapes-id2label.json""" __lowerCamelCase = (1, 19, 128, 128) else: raise ValueError(f'Model {model_name} not supported' ) elif "mit" in model_name: __lowerCamelCase = True __lowerCamelCase = model_name[4:6] __lowerCamelCase = 1000 __lowerCamelCase = """imagenet-1k-id2label.json""" __lowerCamelCase = (1, 1000) else: raise ValueError(f'Model {model_name} not supported' ) # set config attributes __lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": __lowerCamelCase = [64, 128, 320, 512] __lowerCamelCase = 256 elif size == "b2": __lowerCamelCase = [64, 128, 320, 512] __lowerCamelCase = 768 __lowerCamelCase = [3, 4, 6, 3] elif size == "b3": __lowerCamelCase = [64, 128, 320, 512] __lowerCamelCase = 768 __lowerCamelCase = [3, 4, 18, 3] elif size == "b4": __lowerCamelCase = [64, 128, 320, 512] __lowerCamelCase = 768 __lowerCamelCase = [3, 8, 27, 3] elif size == "b5": __lowerCamelCase = [64, 128, 320, 512] __lowerCamelCase = 768 __lowerCamelCase = [3, 6, 40, 3] else: raise ValueError(f'Size {size} not supported' ) # load image processor (only resize + normalize) __lowerCamelCase = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=A__ , align=A__ , do_random_crop=A__ ) # prepare image __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict if encoder_only: __lowerCamelCase = torch.load(A__ , map_location=torch.device("""cpu""" ) ) else: __lowerCamelCase = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys __lowerCamelCase = rename_keys(A__ , encoder_only=A__ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(A__ , A__ ) # create HuggingFace model and load state dict if encoder_only: __lowerCamelCase = False __lowerCamelCase = SegformerForImageClassification(A__ ) else: __lowerCamelCase = SegformerForSemanticSegmentation(A__ ) model.load_state_dict(A__ ) model.eval() # forward pass __lowerCamelCase = model(A__ ) __lowerCamelCase = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": __lowerCamelCase = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": __lowerCamelCase = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": __lowerCamelCase = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": __lowerCamelCase = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": __lowerCamelCase = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": __lowerCamelCase = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": __lowerCamelCase = torch.tensor( [ [ [-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1], [-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1], [-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1], ], [ [-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1], [-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1], [-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1], ], [ [7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2], [4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1], [3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": __lowerCamelCase = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": __lowerCamelCase = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: __lowerCamelCase = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='segformer.b0.512x512.ade.160k', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) UpperCAmelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
12
'''simple docstring''' from __future__ import annotations import math class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = size # approximate the overall size of segment tree with given value UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCamelCase = [0 for i in range(0 , 4 * size )] UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 + 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if left_element == right_element: UpperCamelCase = a[left_element - 1] else: UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCamelCase = val if left_element != right_element: UpperCamelCase = val UpperCamelCase = val UpperCamelCase = True UpperCamelCase = True return True UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) return True def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCamelCase = (left_element + right_element) // 2 UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __str__( self ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] SCREAMING_SNAKE_CASE__ = 1_5 SCREAMING_SNAKE_CASE__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
321
0
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor lowerCAmelCase : int = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
13
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 1000 )-> int: UpperCamelCase = -1 UpperCamelCase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase = n - a - b if c * c == (a * a + b * b): UpperCamelCase = a * b * c if candidate >= product: UpperCamelCase = candidate return product if __name__ == "__main__": print(f'{solution() = }')
321
0
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _lowerCamelCase : Tuple = """CompVis/stable-diffusion-v1-1""" _lowerCamelCase : Tuple = """CompVis/stable-diffusion-v1-2""" _lowerCamelCase : List[Any] = """CompVis/stable-diffusion-v1-3""" _lowerCamelCase : Optional[Any] = """CompVis/stable-diffusion-v1-4""" class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , UpperCAmelCase__ : bool = True , ) ->List[Any]: '''simple docstring''' super()._init_() A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline( vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , requires_safety_checker=UpperCAmelCase__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea) @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict[str, Any]: '''simple docstring''' return {k: getattr(self , UpperCAmelCase__) for k in self.config.keys() if not k.startswith('''_''')} def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Union[str, int]] = "auto") ->List[Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' self.enable_attention_slicing(UpperCAmelCase__) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : str , ) ->Tuple: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : str , ) ->Optional[int]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[Any]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Tuple: '''simple docstring''' A__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(UpperCAmelCase__) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""") # Get first result from Stable Diffusion Checkpoint v1.1 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
14
'''simple docstring''' import argparse import struct import unittest class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = data # Initialize hash values UpperCamelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCamelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes: """simple docstring""" UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64)) UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) -> None: """simple docstring""" UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 ) UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 ) UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class a_ ( unittest.TestCase ): def A__ ( self ) -> None: """simple docstring""" import hashlib UpperCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() ) def lowercase__ ( )-> None: import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCamelCase = f.read() else: UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
321
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Any = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "mvp" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : str ,A : Optional[Any]=5_02_67 ,A : int=10_24 ,A : List[Any]=12 ,A : Any=40_96 ,A : Dict=16 ,A : Any=12 ,A : Optional[int]=40_96 ,A : Optional[int]=16 ,A : List[Any]=0.0 ,A : List[Any]=0.0 ,A : Optional[Any]="gelu" ,A : int=10_24 ,A : int=0.1 ,A : Tuple=0.0 ,A : Optional[Any]=0.0 ,A : Optional[Any]=0.02 ,A : str=0.0 ,A : Any=False ,A : Optional[Any]=True ,A : str=1 ,A : Optional[Any]=0 ,A : Optional[Any]=2 ,A : List[Any]=True ,A : int=2 ,A : str=2 ,A : List[Any]=False ,A : str=1_00 ,A : Any=8_00 ,**A : str ,): __A = vocab_size __A = max_position_embeddings __A = d_model __A = encoder_ffn_dim __A = encoder_layers __A = encoder_attention_heads __A = decoder_ffn_dim __A = decoder_layers __A = decoder_attention_heads __A = dropout __A = attention_dropout __A = activation_dropout __A = activation_function __A = init_std __A = encoder_layerdrop __A = decoder_layerdrop __A = classifier_dropout __A = use_cache __A = encoder_layers __A = scale_embedding # scale factor will be sqrt(d_model) if True __A = use_prompt __A = prompt_length __A = prompt_mid_dim super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,is_encoder_decoder=A ,decoder_start_token_id=A ,forced_eos_token_id=A ,**A ,) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,A ): __A = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' "The config can simply be saved and uploaded again to be fixed." )
15
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) SCREAMING_SNAKE_CASE__ = _symbol_database.Default() SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) SCREAMING_SNAKE_CASE__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" SCREAMING_SNAKE_CASE__ = 4_5 SCREAMING_SNAKE_CASE__ = 1_5_8_1 SCREAMING_SNAKE_CASE__ = 1_5_1_7 SCREAMING_SNAKE_CASE__ = 1_5_7_0 SCREAMING_SNAKE_CASE__ = 1_5_8_4 SCREAMING_SNAKE_CASE__ = 1_7_9_3 SCREAMING_SNAKE_CASE__ = 1_7_9_5 SCREAMING_SNAKE_CASE__ = 1_9_1_6 SCREAMING_SNAKE_CASE__ = 1_8_6_4 SCREAMING_SNAKE_CASE__ = 1_9_0_5 SCREAMING_SNAKE_CASE__ = 1_9_1_9 SCREAMING_SNAKE_CASE__ = 2_4_2_9 SCREAMING_SNAKE_CASE__ = 2_2_0_8 SCREAMING_SNAKE_CASE__ = 2_4_1_8 SCREAMING_SNAKE_CASE__ = 2_3_2_3 SCREAMING_SNAKE_CASE__ = 2_4_0_7 # @@protoc_insertion_point(module_scope)
321
0
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase_ = '\\n Text data.\n Second line of data.' lowerCAmelCase_ = 'file' @pytest.fixture(scope='''session''' ) def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]: lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') lowercase__ : Any = bytes(__lowerCamelCase , '''utf-8''' ) with zstd.open(__lowerCamelCase , '''wb''' ) as f: f.write(__lowerCamelCase ) return path @pytest.fixture def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple: with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , '''w''' ) as f: f.write(__lowerCamelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: lowercase__ : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} lowercase__ : str = input_paths[compression_format] lowercase__ : Tuple = tmp_path / '''cache''' lowercase__ : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase ) lowercase__ : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase ) with open(__lowerCamelCase ) as f: lowercase__ : Dict = f.read() with open(__lowerCamelCase ) as f: lowercase__ : int = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: lowercase__ : Any = '''custom_cache''' lowercase__ : Tuple = '''custom_extracted_dir''' lowercase__ : Tuple = tmp_path / '''custom_extracted_path''' if default_extracted: lowercase__ : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __lowerCamelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__lowerCamelCase ) ) lowercase__ : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) lowercase__ : int = xz_file lowercase__ : List[Any] = ( DownloadConfig(extract_compressed_file=__lowerCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase ) ) lowercase__ : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase ) assert Path(__lowerCamelCase ).parent.parts[-2:] == expected def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: # absolute path lowercase__ : List[Any] = str(Path(__lowerCamelCase ).resolve() ) assert cached_path(__lowerCamelCase ) == text_file # relative path lowercase__ : Union[str, Any] = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__lowerCamelCase ) == text_file def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: # absolute path lowercase__ : Dict = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(__lowerCamelCase ): cached_path(__lowerCamelCase ) # relative path lowercase__ : Dict = '''./__missing_file__.txt''' with pytest.raises(__lowerCamelCase ): cached_path(__lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase ) -> int: lowercase__ : List[str] = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(__lowerCamelCase ) as f: lowercase__ : int = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase ) def __UpperCAmelCase ( ) -> Tuple: with pytest.raises(__lowerCamelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]: lowercase__ : str = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(__lowerCamelCase ): http_get('''https://huggingface.co''' , temp_file=__lowerCamelCase ) with pytest.raises(__lowerCamelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]: lowercase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(__lowerCamelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=__lowerCamelCase ) with pytest.raises(__lowerCamelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]: lowercase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(__lowerCamelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=__lowerCamelCase ) with pytest.raises(__lowerCamelCase ): fsspec_head('''s3://huggingface.co''' )
16
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import cva import numpy as np class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple, UpperCAmelCase__ : float, UpperCAmelCase__ : int ): if k in (0.04, 0.06): __lowercase = k __lowercase = window_size else: raise ValueError("invalid k value" ) def __str__( self : Any ): return str(self.k ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : str ): __lowercase = cva.imread(UpperCAmelCase__, 0 ) __lowercase ,__lowercase = img.shape __lowercase = [] __lowercase = img.copy() __lowercase = cva.cvtColor(UpperCAmelCase__, cva.COLOR_GRAY2RGB ) __lowercase ,__lowercase = np.gradient(UpperCAmelCase__ ) __lowercase = dx**2 __lowercase = dy**2 __lowercase = dx * dy __lowercase = 0.04 __lowercase = self.window_size // 2 for y in range(UpperCAmelCase__, h - offset ): for x in range(UpperCAmelCase__, w - offset ): __lowercase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = (wxx * wyy) - (wxy**2) __lowercase = wxx + wyy __lowercase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0), 0 ) color_img.itemset((y, x, 1), 0 ) color_img.itemset((y, x, 2), 2_5_5 ) return color_img, corner_list if __name__ == "__main__": _a = HarrisCorner(0.04, 3) _a , _a = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
17
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __UpperCamelCase )-> str: if "://" in dataset_path: UpperCamelCase = dataset_path.split("""://""" )[1] return dataset_path def lowercase__ ( __UpperCamelCase )-> bool: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: UpperCamelCase = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def lowercase__ ( )-> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCamelCase = None UpperCamelCase = None UpperCamelCase = threading.Lock()
321
0
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class a__ ( A__ ): def __init__( self : Dict,_A : List[Any],_A : Any ): """simple docstring""" super().__init__() self.register_modules(unet=_A,scheduler=_A ) @torch.no_grad() def __call__( self : Dict,_A : int = 1,_A : Optional[torch.Generator] = None,_A : int = 50,_A : Optional[str] = "pil",_A : bool = True,**_A : str,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=_A,) SCREAMING_SNAKE_CASE_ : int = image.to(self.device ) # set step values self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output SCREAMING_SNAKE_CASE_ : Tuple = self.unet(_A,_A ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.step(_A,_A,_A ).prev_sample SCREAMING_SNAKE_CASE_ : str = (image / 2 + 0.5).clamp(0,1 ) SCREAMING_SNAKE_CASE_ : Tuple = image.cpu().permute(0,2,3,1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_ : Dict = self.numpy_to_pil(_A ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=_A ), "This is a local test"
18
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A ={ '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
19
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase : Optional[int] = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
20
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: UpperCamelCase = 1.5 UpperCamelCase = int(factor * num_class_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: UpperCamelCase = client.query(text=__UpperCamelCase ) if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4: break else: UpperCamelCase = int(factor * num_images ) UpperCamelCase = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , ) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase ) with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open( F"{class_data_dir}/images.txt" , """w""" ) as fa: while total < num_class_images: UpperCamelCase = class_images[count] count += 1 try: UpperCamelCase = requests.get(images["""url"""] ) if img.status_code == 200: UpperCamelCase = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__ ( )-> str: UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
321
0
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class _lowerCamelCase( unittest.TestCase ): @parameterized.expand([(None,), ('foo.json',)]) def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]: """simple docstring""" _lowercase : Tuple = GenerationConfig( do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase, config_name=lowerCamelCase) _lowercase : List[str] = GenerationConfig.from_pretrained(lowerCamelCase, config_name=lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample, lowerCamelCase) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, lowerCamelCase) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : int = AutoConfig.from_pretrained('gpt2') _lowercase : List[str] = GenerationConfig.from_model_config(lowerCamelCase) _lowercase : Union[str, Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowerCamelCase, lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Union[str, Any] = GenerationConfig() _lowercase : List[Any] = { 'max_new_tokens': 10_24, 'foo': 'bar', } _lowercase : Any = copy.deepcopy(lowerCamelCase) _lowercase : List[Any] = generation_config.update(**lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(lowerCamelCase, lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens, 10_24) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowerCamelCase, {'foo': 'bar'}) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : List[Any] = GenerationConfig() _lowercase : List[Any] = 'bar' with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir: generation_config.save_pretrained(lowerCamelCase) _lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo, 'bar') _lowercase : int = GenerationConfig.from_model_config(lowerCamelCase) assert not hasattr(lowerCamelCase, 'foo') # no new kwargs should be initialized if from config def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, lowerCamelCase) self.assertEqual(default_config.num_beams, 1) _lowercase : Optional[Any] = GenerationConfig( do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, lowerCamelCase) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase) _lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, lowerCamelCase) self.assertEqual(loaded_config.num_beams, 1) # default value @is_staging_test class _lowerCamelCase( unittest.TestCase ): @classmethod def UpperCamelCase ( cls) -> str: """simple docstring""" _lowercase : Union[str, Any] = TOKEN HfFolder.save_token(lowerCamelCase) @classmethod def UpperCamelCase ( cls) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token, repo_id='test-generation-config') except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-generation-config-org') except HTTPError: pass def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : List[Any] = GenerationConfig( do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, ) config.push_to_hub('test-generation-config', use_auth_token=self._token) _lowercase : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase)) # Reset repo delete_repo(token=self._token, repo_id='test-generation-config') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase, repo_id='test-generation-config', push_to_hub=lowerCamelCase, use_auth_token=self._token) _lowercase : Tuple = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase)) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : Dict = GenerationConfig( do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, ) config.push_to_hub('valid_org/test-generation-config-org', use_auth_token=self._token) _lowercase : Any = GenerationConfig.from_pretrained('valid_org/test-generation-config-org') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase)) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-generation-config-org') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase, repo_id='valid_org/test-generation-config-org', push_to_hub=lowerCamelCase, use_auth_token=self._token) _lowercase : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
21
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class a_ : lowercase = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} ) lowercase = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = {} if self.train_dir is not None: UpperCamelCase = self.train_dir if self.validation_dir is not None: UpperCamelCase = self.validation_dir UpperCamelCase = data_files if data_files else None @dataclass class a_ : lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) lowercase = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase = field( default=lowerCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) lowercase = field( default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class a_ ( lowerCamelCase ): lowercase = field( default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__ ( )-> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0: UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase = split["""train"""] UpperCamelCase = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase ) elif model_args.model_name_or_path: UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: UpperCamelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase ) if training_args.do_train: UpperCamelCase = ds["""train"""].column_names else: UpperCamelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase = data_args.image_column_name elif "image" in column_names: UpperCamelCase = """image""" elif "img" in column_names: UpperCamelCase = """img""" else: UpperCamelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase = image_processor.size["""shortest_edge"""] else: UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase = Compose( [ Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__UpperCamelCase ): UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__UpperCamelCase ) # Compute absolute learning rate UpperCamelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: UpperCamelCase = None if training_args.resume_from_checkpoint is not None: UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase = last_checkpoint UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCamelCase ) trainer.save_metrics("""eval""" , __UpperCamelCase ) # Write model card and (optionally) push to hub UpperCamelCase = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCamelCase ) else: trainer.create_model_card(**__UpperCamelCase ) def lowercase__ ( __UpperCamelCase )-> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
321
0
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class A_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , snake_case_ : Dict[str, int] , snake_case_ : List[str] , snake_case_ : int = None , snake_case_ : int = None ): super().__init__() _UpperCAmelCase = pad_token_id _UpperCAmelCase = max_length _UpperCAmelCase = vocab _UpperCAmelCase = merges _UpperCAmelCase = BytePairTokenizer(snake_case_ , snake_case_ , sequence_length=snake_case_ ) @classmethod def lowercase ( cls : Optional[int] , snake_case_ : GPTaTokenizer , *snake_case_ : List[Any] , **snake_case_ : Any ): _UpperCAmelCase = [" ".join(snake_case_ ) for m in tokenizer.bpe_ranks.keys()] _UpperCAmelCase = tokenizer.get_vocab() return cls(snake_case_ , snake_case_ , *snake_case_ , **snake_case_ ) @classmethod def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ): _UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case_ , *snake_case_ , **snake_case_ ) return cls.from_tokenizer(snake_case_ , *snake_case_ , **snake_case_ ) @classmethod def lowercase ( cls : int , snake_case_ : List[Any] ): return cls(**snake_case_ ) def lowercase ( self : str ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowercase ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int = None ): _UpperCAmelCase = self.tf_tokenizer(snake_case_ ) _UpperCAmelCase = tf.ones_like(snake_case_ ) if self.pad_token_id is not None: # pad the tokens up to max length _UpperCAmelCase = max_length if max_length is not None else self.max_length if max_length is not None: _UpperCAmelCase , _UpperCAmelCase = pad_model_inputs( snake_case_ , max_seq_length=snake_case_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
22
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = 2_5_6 class a_ ( lowerCamelCase ): lowercase = ["""melgan"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__() # From MELGAN UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training. UpperCamelCase = 4.0 # Largest value for most examples UpperCamelCase = 128 self.register_modules( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" UpperCamelCase ,UpperCamelCase = output_range if clip: UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = input_range UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs # Scale to [0, 1]. UpperCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = input_tokens > 0 UpperCamelCase ,UpperCamelCase = self.notes_encoder( encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.continuous_encoder( encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" UpperCamelCase = noise_time if not torch.is_tensor(_SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase = self.decoder( encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE ) return logits @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_SCREAMING_SNAKE_CASE )}." ) UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCamelCase = ones UpperCamelCase = self.scale_features( _SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCamelCase = self.decode( encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] ) UpperCamelCase = mel[:1] UpperCamelCase = mel.cpu().float().numpy() UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
321
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
23
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 4000000 )-> int: UpperCamelCase = [] UpperCamelCase ,UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
321
0
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ = 256047 snake_case_ = 256145 @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Any = NllbTokenizer A_ : Tuple = NllbTokenizerFast A_ : Union[str, Any] = True A_ : List[str] = True A_ : int = {} def a (self : Any ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __snake_case = NllbTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : List[Any] ): """simple docstring""" __snake_case = NllbTokenizer(a__ , keep_accents=a__ ) __snake_case = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __snake_case = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __snake_case = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) __snake_case = self.tokenizer_class.from_pretrained(a__ , **a__ ) __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(a__ , a__ ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) # Save tokenizer rust, legacy_format=True __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it save with the same files self.assertSequenceEqual(a__ , a__ ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) # Save tokenizer rust, legacy_format=False __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) @require_torch def a (self : Optional[Any] ): """simple docstring""" if not self.test_seqaseq: return __snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. __snake_case = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] __snake_case = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: __snake_case = tokenizer.prepare_seqaseq_batch( src_texts=a__ , tgt_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified __snake_case = tokenizer.prepare_seqaseq_batch( a__ , tgt_texts=a__ , max_length=3 , return_tensors='''pt''' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __snake_case = tokenizer.prepare_seqaseq_batch( src_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('''decoder_input_ids''' , a__ ) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' ) def a (self : List[str] ): """simple docstring""" pass def a (self : int ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = [AddedToken('''<special>''' , lstrip=a__ )] __snake_case = self.rust_tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ ) __snake_case = tokenizer_r.encode('''Hey this is a <special> token''' ) __snake_case = tokenizer_r.encode('''<special>''' , add_special_tokens=a__ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __snake_case = self.rust_tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ , ) __snake_case = self.tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ ) __snake_case = tokenizer_p.encode('''Hey this is a <special> token''' ) __snake_case = tokenizer_cr.encode('''Hey this is a <special> token''' ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : List[Any] = 'facebook/nllb-200-distilled-600M' A_ : List[str] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] A_ : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] A_ : List[Any] = [ 256_047, 16_297, 134_408, 8_165, 248_066, 14_734, 950, 1_135, 105_721, 3_573, 83, 27_352, 108, 49_486, 2, ] @classmethod def a (cls : Tuple ): """simple docstring""" __snake_case = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' ) __snake_case = 1 return cls def a (self : Optional[int] ): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_6057 ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a__ ) def a (self : Optional[Any] ): """simple docstring""" self.assertIn(a__ , self.tokenizer.all_special_ids ) # fmt: off __snake_case = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on __snake_case = self.tokenizer.decode(a__ , skip_special_tokens=a__ ) __snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ ) self.assertEqual(a__ , a__ ) self.assertNotIn(self.tokenizer.eos_token , a__ ) def a (self : Optional[int] ): """simple docstring""" __snake_case = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , a__ ) __snake_case = 10 __snake_case = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , a__ ) self.assertEqual(len(a__ ) , a__ ) def a (self : Tuple ): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_6203, 3] ) def a (self : Any ): """simple docstring""" __snake_case = tempfile.mkdtemp() __snake_case = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(a__ ) __snake_case = NllbTokenizer.from_pretrained(a__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ ) @require_torch def a (self : str ): """simple docstring""" __snake_case = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __snake_case = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] ) self.assertIsInstance(a__ , a__ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) __snake_case = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , a__ ) self.assertEqual(a__ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors='''pt''' ) __snake_case = self.tokenizer( text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors='''pt''' ) __snake_case = targets['''input_ids'''] __snake_case = shift_tokens_right( a__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a (self : str ): """simple docstring""" __snake_case = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( nested_simplify(a__ ) , { # A, test, EOS, en_XX '''input_ids''': [[25_6047, 70, 7356, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_6057, } , ) @require_torch def a (self : Optional[Any] ): """simple docstring""" __snake_case = True __snake_case = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) __snake_case = False __snake_case = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
24
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: # Base Case if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]: UpperCamelCase = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride SCREAMING_SNAKE_CASE__ : Any = time_stride SCREAMING_SNAKE_CASE__ : Optional[int] = max_length SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
25
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 2000000 )-> int: UpperCamelCase = [0 for i in range(n + 1 )] UpperCamelCase = 1 UpperCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCamelCase ): UpperCamelCase = 1 UpperCamelCase = 0 for i in range(__UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
321
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : str = tempfile.mkdtemp() _A : str = BlipImageProcessor() _A : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) _A : Union[str, Any] = BlipaProcessor(_a , _a ) processor.save_pretrained(self.tmpdirname ) def a__ ( self , **_a ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer def a__ ( self , **_a ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor def a__ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> Any: _A : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _A : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self ) -> List[Any]: _A : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _A : Optional[int] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _A : List[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def a__ ( self ) -> str: _A : Any = self.get_image_processor() _A : Optional[Any] = self.get_tokenizer() _A : List[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : List[str] = self.prepare_image_inputs() _A : Optional[Any] = image_processor(_a , return_tensors="""np""" ) _A : Optional[int] = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a__ ( self ) -> List[Any]: _A : Tuple = self.get_image_processor() _A : List[str] = self.get_tokenizer() _A : str = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Optional[Any] = """lower newer""" _A : Tuple = processor(text=_a ) _A : Optional[int] = tokenizer(_a , return_token_type_ids=_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a__ ( self ) -> Optional[Any]: _A : str = self.get_image_processor() _A : List[str] = self.get_tokenizer() _A : Dict = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : int = """lower newer""" _A : List[str] = self.prepare_image_inputs() _A : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def a__ ( self ) -> Optional[Any]: _A : List[Any] = self.get_image_processor() _A : Optional[int] = self.get_tokenizer() _A : str = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A : Tuple = processor.batch_decode(_a ) _A : Optional[Any] = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def a__ ( self ) -> Any: _A : List[str] = self.get_image_processor() _A : Any = self.get_tokenizer() _A : Optional[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Tuple = """lower newer""" _A : str = self.prepare_image_inputs() _A : Any = processor(text=_a , images=_a ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
26
'''simple docstring''' from timeit import timeit def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: number &= number - 1 result += 1 return result def lowercase__ ( __UpperCamelCase )-> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__ ( )-> None: def do_benchmark(__UpperCamelCase ) -> None: UpperCamelCase = """import __main__ as z""" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) UpperCamelCase = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
0
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): __a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' ) __a : Union[str, Any] = soup.findAll('h1' ) __a : int = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
321
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @staticmethod @abstractmethod def A ( UpperCamelCase__ : ArgumentParser ): """simple docstring""" raise NotImplementedError() @abstractmethod def A ( self : Any ): """simple docstring""" raise NotImplementedError()
28
'''simple docstring''' import math def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
321
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) requires_backends(self , 'decord' ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict: UpperCAmelCase_ : Optional[Any] = {} if frame_sampling_rate is not None: UpperCAmelCase_ : Optional[Any] = frame_sampling_rate if num_frames is not None: UpperCAmelCase_ : Optional[Any] = num_frames UpperCAmelCase_ : Optional[Any] = {} if top_k is not None: UpperCAmelCase_ : List[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]: return super().__call__(_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Optional[int]: if num_frames is None: UpperCAmelCase_ : Optional[Any] = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCAmelCase_ : List[str] = BytesIO(requests.get(_UpperCamelCase ).content ) UpperCAmelCase_ : Optional[Any] = VideoReader(_UpperCamelCase ) videoreader.seek(0 ) UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1 UpperCAmelCase_ : Optional[int] = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa ) UpperCAmelCase_ : List[Any] = videoreader.get_batch(_UpperCamelCase ).asnumpy() UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Dict = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> List[str]: if top_k > self.model.config.num_labels: UpperCAmelCase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : Dict = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : str = scores.tolist() UpperCAmelCase_ : Any = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
0
def a ( snake_case__: float , snake_case__: float , snake_case__: float , snake_case__: float , snake_case__: float , ): '''simple docstring''' lowercase_ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: lowercase_ = 1 - (matter_density + radiation_density + dark_energy) lowercase_ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowercase_ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __a = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
30
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__ ( __UpperCamelCase = 1000000 )-> int: UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
321
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ (metaclass=snake_case__ ): '''simple docstring''' __UpperCamelCase: Optional[int] = ["torch", "scipy"] def __init__( self : Any , *A : List[str] , **A : Tuple ): requires_backends(self , ["torch", "scipy"] ) @classmethod def _A ( cls : Dict , *A : Union[str, Any] , **A : int ): requires_backends(cls , ["torch", "scipy"] ) @classmethod def _A ( cls : Dict , *A : int , **A : Optional[Any] ): requires_backends(cls , ["torch", "scipy"] )
31
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
321
0
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[Any] ) -> Dict: """simple docstring""" assert isinstance(__A , __A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[Any] , __A : Tuple ) -> Optional[Any]: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a_ : str = JsonDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read() _check_json_dataset(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple , __A : Optional[int] ) -> Dict: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : int = features.copy() if features else default_expected_features a_ : str = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() _check_json_dataset(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[Any] , __A : str ) -> Any: """simple docstring""" a_ : Tuple = tmp_path / 'cache' a_ : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a_ : Tuple = features.copy() if features else default_expected_features a_ : str = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : List[str] = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() assert isinstance(__A , __A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] ) -> Any: """simple docstring""" a_ : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a_ : Optional[Any] = features.copy() a_ : List[str] = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : List[Any] = tmp_path / 'cache' a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() assert isinstance(__A , __A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[int] , __A : Any ) -> List[Any]: """simple docstring""" a_ : List[Any] = tmp_path / 'cache' a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : str = JsonDatasetReader(__A , cache_dir=__A , split=__A ).read() _check_json_dataset(__A , __A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] , __A : Optional[int] ) -> List[Any]: """simple docstring""" if issubclass(__A , __A ): a_ : Dict = jsonl_path elif issubclass(__A , __A ): a_ : Tuple = [jsonl_path] a_ : List[str] = tmp_path / 'cache' a_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : int = JsonDatasetReader(__A , cache_dir=__A ).read() _check_json_dataset(__A , __A ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Tuple , __A : Tuple=("train",) ) -> int: """simple docstring""" assert isinstance(__A , __A ) for split in splits: a_ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Dict , __A : Any ) -> Union[str, Any]: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a_ : List[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=__A , keep_in_memory=__A ).read() _check_json_datasetdict(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Any , __A : List[Any] ) -> List[str]: """simple docstring""" a_ : List[Any] = tmp_path / 'cache' a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : Union[str, Any] = features.copy() if features else default_expected_features a_ : Tuple = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : Dict = JsonDatasetReader({'train': jsonl_path} , features=__A , cache_dir=__A ).read() _check_json_datasetdict(__A , __A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str , __A : List[str] ) -> Any: """simple docstring""" if split: a_ : Union[str, Any] = {split: jsonl_path} else: a_ : str = 'train' a_ : List[Any] = {'train': jsonl_path, 'test': jsonl_path} a_ : Optional[Any] = tmp_path / 'cache' a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : Optional[int] = JsonDatasetReader(__A , cache_dir=__A ).read() _check_json_datasetdict(__A , __A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return json.load(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]: """simple docstring""" return [json.loads(__A ) for line in buffer] class SCREAMING_SNAKE_CASE__ : @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write() buffer.seek(0 ) a_ : Union[str, Any] = load_json_function(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write() buffer.seek(0 ) a_ : List[Any] = load_json(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write() buffer.seek(0 ) a_ : Optional[int] = load_json_function(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write() buffer.seek(0 ) a_ : str = load_json(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: with pytest.raises(SCREAMING_SNAKE_CASE__ ): with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: a_ : Optional[int] = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a_ : int = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write() with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f: a_ : Optional[Any] = f.read() with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f: a_ : List[str] = f.read() assert exported_content == original_content
32
'''simple docstring''' # Algorithm for the pigeonhole sorting def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase = count + min_val i += 1 def lowercase__ ( )-> Any: UpperCamelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
321
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : List[Any] = logging.get_logger(__name__) __A : List[Any] = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class _UpperCAmelCase ( _A , _A ): SCREAMING_SNAKE_CASE_ : List[Any] = "resnet" SCREAMING_SNAKE_CASE_ : Tuple = ["basic", "bottleneck"] def __init__( self : Any , A : Tuple=3 , A : str=64 , A : Tuple=[2_56, 5_12, 10_24, 20_48] , A : List[Any]=[3, 4, 6, 3] , A : Union[str, Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Tuple=None , A : int=None , **A : List[str] , ) -> List[Any]: super().__init__(**A ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' ) lowercase_ : List[Any] = num_channels lowercase_ : Tuple = embedding_size lowercase_ : Dict = hidden_sizes lowercase_ : Tuple = depths lowercase_ : Optional[int] = layer_type lowercase_ : str = hidden_act lowercase_ : Dict = downsample_in_first_stage lowercase_ : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )] lowercase_ , lowercase_ : List[str] = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names ) class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : str = version.parse("1.11" ) @property def A ( self : Any ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def A ( self : Union[str, Any] ) -> float: return 1e-3
33
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a_ ( lowerCamelCase ): lowercase = (DDPMParallelScheduler,) def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_SCREAMING_SNAKE_CASE ) return config def A__ ( self ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = self.dummy_sample_deter + 0.1 UpperCamelCase = self.dummy_sample_deter - 0.1 UpperCamelCase = samplea.shape[0] UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE ) UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(_SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ): if i == len(_SCREAMING_SNAKE_CASE ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE ) UpperCamelCase = prev_t.item() self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
321
0
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _a : def __init__( self : Any , lowercase : Any , lowercase : int , lowercase : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) UpperCAmelCase = img UpperCAmelCase = img.shape[1] UpperCAmelCase = img.shape[0] UpperCAmelCase = dst_width UpperCAmelCase = dst_height UpperCAmelCase = self.src_w / self.dst_w UpperCAmelCase = self.src_h / self.dst_h UpperCAmelCase = UpperCAmelCase = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def A ( self : Union[str, Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): UpperCAmelCase = self.img[self.get_y(lowercase )][self.get_x(lowercase )] def A ( self : Dict , lowercase : int ): '''simple docstring''' return int(self.ratio_x * x ) def A ( self : List[Any] , lowercase : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": A , A =8_00, 6_00 A =imread('image_data/lena.jpg', 1) A =NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
34
'''simple docstring''' from __future__ import annotations import math class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = size # approximate the overall size of segment tree with given value UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCamelCase = [0 for i in range(0 , 4 * size )] UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return idx * 2 + 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if left_element == right_element: UpperCamelCase = a[left_element - 1] else: UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCamelCase = val if left_element != right_element: UpperCamelCase = val UpperCamelCase = val UpperCamelCase = True UpperCamelCase = True return True UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = max( self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] ) return True def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float: """simple docstring""" if self.flag[idx] is True: UpperCamelCase = self.lazy[idx] UpperCamelCase = False if left_element != right_element: UpperCamelCase = self.lazy[idx] UpperCamelCase = self.lazy[idx] UpperCamelCase = True UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCamelCase = (left_element + right_element) // 2 UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __str__( self ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] SCREAMING_SNAKE_CASE__ = 1_5 SCREAMING_SNAKE_CASE__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
321
0
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 1000 )-> int: UpperCamelCase = -1 UpperCamelCase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase = n - a - b if c * c == (a * a + b * b): UpperCamelCase = a * b * c if candidate >= product: UpperCamelCase = candidate return product if __name__ == "__main__": print(f'{solution() = }')
321
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _snake_case = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. _snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS) _snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING _snake_case = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): _lowerCAmelCase : Any = True # Deal with multi-line cases elif ( re.search( rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCamelCase , ) is not None ): _lowerCAmelCase : Union[str, Any] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _lowerCAmelCase : Optional[int] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _lowerCAmelCase : Tuple = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] _lowerCAmelCase : List[str] = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed _lowerCAmelCase : Dict = True if not attribute_used: _lowerCAmelCase : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _lowerCAmelCase : Tuple = True elif attribute in ["tie_word_embeddings"] and default_value is False: _lowerCAmelCase : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _lowerCAmelCase : Dict = True elif attribute.endswith("_token_id" ): _lowerCAmelCase : Any = True # configuration class specific cases if not case_allowed: _lowerCAmelCase : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _lowerCAmelCase : Optional[Any] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = dict(inspect.signature(config_class.__init__ ).parameters ) _lowerCAmelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] _lowerCAmelCase : List[Any] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _lowerCAmelCase : Optional[Any] = {} if len(config_class.attribute_map ) > 0: _lowerCAmelCase : Any = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _lowerCAmelCase : Optional[Any] = inspect.getsourcefile(_lowerCamelCase ) _lowerCAmelCase : str = os.path.dirname(_lowerCamelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _lowerCAmelCase : Any = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("modeling_" )] # Get the source code strings _lowerCAmelCase : Union[str, Any] = [] for path in modeling_paths: if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase ) as fp: modeling_sources.append(fp.read() ) _lowerCAmelCase : List[str] = [] for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ): # `attributes` here is all the variant names for `config_param` _lowerCAmelCase : int = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCamelCase ) def A ( ): '''simple docstring''' _lowerCAmelCase : int = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _lowerCAmelCase : Tuple = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ) and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _lowerCAmelCase : Optional[int] = check_config_attributes_being_used(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: _lowerCAmelCase : Dict = unused_attributes if len(_lowerCamelCase ) > 0: _lowerCAmelCase : Dict = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(_lowerCamelCase ) if __name__ == "__main__": check_config_attributes()
36
'''simple docstring''' import argparse import struct import unittest class a_ : def __init__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase = data # Initialize hash values UpperCamelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCamelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes: """simple docstring""" UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64)) UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def A__ ( self ) -> None: """simple docstring""" UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 ) UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 ) UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class a_ ( unittest.TestCase ): def A__ ( self ) -> None: """simple docstring""" import hashlib UpperCamelCase = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() ) def lowercase__ ( )-> None: import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCamelCase = f.read() else: UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
321
0
'''simple docstring''' from manim import * class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = Rectangle(height=0.5 ,width=0.5 ) lowerCAmelCase__ : Any = Rectangle(height=0.2_5 ,width=0.2_5 ) lowerCAmelCase__ : List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[str] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : List[Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = Text("""CPU""" ,font_size=24 ) lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = [mem.copy() for i in range(4 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[int] = Text("""GPU""" ,font_size=24 ) lowerCAmelCase__ : Union[str, Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[str] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = Text("""Model""" ,font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Any = [] lowerCAmelCase__ : Any = [] lowerCAmelCase__ : List[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : str = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase ,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] ,direction=__UpperCAmelCase ,buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] ,direction=__UpperCAmelCase ,buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase ) lowerCAmelCase__ : Dict = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Any = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : int = Text("""Loaded Checkpoint""" ,font_size=24 ) lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = fill.copy().set_fill(__UpperCAmelCase ,opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase ,*__UpperCAmelCase ) lowerCAmelCase__ : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : Any = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : int = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(__UpperCAmelCase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : str = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Union[str, Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Tuple = Text("""Disk""" ,font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(__UpperCAmelCase ,run_time=3 ) ,Write(__UpperCAmelCase ,run_time=1 ) ,Create(__UpperCAmelCase ,run_time=1 ) ) lowerCAmelCase__ : List[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase ,run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : Tuple = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" ,font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase ,run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase ) ,) self.wait()
37
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) SCREAMING_SNAKE_CASE__ = _symbol_database.Default() SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) SCREAMING_SNAKE_CASE__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" SCREAMING_SNAKE_CASE__ = 4_5 SCREAMING_SNAKE_CASE__ = 1_5_8_1 SCREAMING_SNAKE_CASE__ = 1_5_1_7 SCREAMING_SNAKE_CASE__ = 1_5_7_0 SCREAMING_SNAKE_CASE__ = 1_5_8_4 SCREAMING_SNAKE_CASE__ = 1_7_9_3 SCREAMING_SNAKE_CASE__ = 1_7_9_5 SCREAMING_SNAKE_CASE__ = 1_9_1_6 SCREAMING_SNAKE_CASE__ = 1_8_6_4 SCREAMING_SNAKE_CASE__ = 1_9_0_5 SCREAMING_SNAKE_CASE__ = 1_9_1_9 SCREAMING_SNAKE_CASE__ = 2_4_2_9 SCREAMING_SNAKE_CASE__ = 2_2_0_8 SCREAMING_SNAKE_CASE__ = 2_4_1_8 SCREAMING_SNAKE_CASE__ = 2_3_2_3 SCREAMING_SNAKE_CASE__ = 2_4_0_7 # @@protoc_insertion_point(module_scope)
321
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> int: """simple docstring""" _enforce_args(__magic_name__ , __magic_name__ ) if n == 0: return 0 UpperCamelCase :List[Any] = float("""-inf""" ) for i in range(1 , n + 1 ): UpperCamelCase :Optional[Any] = max( __magic_name__ , prices[i - 1] + naive_cut_rod_recursive(n - i , __magic_name__ ) ) return max_revue def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> Union[str, Any]: """simple docstring""" _enforce_args(__magic_name__ , __magic_name__ ) UpperCamelCase :Tuple = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__magic_name__ , __magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]: """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: UpperCamelCase :Optional[int] = float("""-inf""" ) for i in range(1 , n + 1 ): UpperCamelCase :List[str] = max( __magic_name__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __magic_name__ , __magic_name__ ) , ) UpperCamelCase :List[str] = max_revenue return max_rev[n] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> str: """simple docstring""" _enforce_args(__magic_name__ , __magic_name__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. UpperCamelCase :Tuple = [float("""-inf""" ) for _ in range(n + 1 )] UpperCamelCase :Any = 0 for i in range(1 , n + 1 ): UpperCamelCase :Tuple = max_rev[i] for j in range(1 , i + 1 ): UpperCamelCase :Union[str, Any] = max(__magic_name__ , prices[j - 1] + max_rev[i - j] ) UpperCamelCase :Dict = max_revenue_i return max_rev[n] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> Optional[int]: """simple docstring""" if n < 0: UpperCamelCase :Tuple = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__magic_name__ ) if n > len(__magic_name__ ): UpperCamelCase :Optional[int] = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(__magic_name__ )}""" ) raise ValueError(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23] UpperCamelCase :Optional[Any] = len(__magic_name__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. UpperCamelCase :Dict = 36 UpperCamelCase :Optional[int] = top_down_cut_rod(__magic_name__ , __magic_name__ ) UpperCamelCase :Any = bottom_up_cut_rod(__magic_name__ , __magic_name__ ) UpperCamelCase :List[Any] = naive_cut_rod_recursive(__magic_name__ , __magic_name__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
38
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = StableDiffusionXLImgaImgPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase ( self ): """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) _UpperCAmelCase = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , ) _UpperCAmelCase = CLIPTextModel(UpperCAmelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase ) _UpperCAmelCase = CLIPTextModelWithProjection(UpperCAmelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase ) _UpperCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ): """simple docstring""" _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) _UpperCAmelCase = image / 2 + 0.5 if str(UpperCAmelCase ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(UpperCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) _UpperCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase ) _UpperCAmelCase = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase ) _UpperCAmelCase = sd_pipe(**UpperCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase ( self ): """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def UpperCamelCase ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase ) _UpperCAmelCase = sd_pipe.to(UpperCAmelCase ) _UpperCAmelCase = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) # forward without prompt embeds _UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase ) _UpperCAmelCase = 3 * ['this is a negative prompt'] _UpperCAmelCase = negative_prompt _UpperCAmelCase = 3 * [inputs['prompt']] _UpperCAmelCase = sd_pipe(**UpperCAmelCase ) _UpperCAmelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds _UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase ) _UpperCAmelCase = 3 * ['this is a negative prompt'] _UpperCAmelCase = 3 * [inputs.pop('prompt' )] ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = sd_pipe.encode_prompt(UpperCAmelCase , negative_prompt=UpperCAmelCase ) _UpperCAmelCase = sd_pipe( **UpperCAmelCase , prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , pooled_prompt_embeds=UpperCAmelCase , negative_pooled_prompt_embeds=UpperCAmelCase , ) _UpperCAmelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase): """simple docstring""" def UpperCamelCase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ): """simple docstring""" _UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) _UpperCAmelCase = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 64, 64) ) _UpperCAmelCase = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase ) _UpperCAmelCase = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) _UpperCAmelCase = self.get_inputs(UpperCAmelCase ) _UpperCAmelCase = pipe(**UpperCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
39
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( __UpperCamelCase )-> str: if "://" in dataset_path: UpperCamelCase = dataset_path.split("""://""" )[1] return dataset_path def lowercase__ ( __UpperCamelCase )-> bool: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: UpperCamelCase = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def lowercase__ ( )-> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCamelCase = None UpperCamelCase = None UpperCamelCase = threading.Lock()
321
0
"""simple docstring""" import argparse import os import re __lowercase = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowercase = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings __lowercase = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""") def lowercase ( A_ , A_ = False )-> Dict: '''simple docstring''' with open(A_ , "r" , encoding="utf-8" ) as f: a : List[str] = f.read() a : Union[str, Any] = content.split("\n" ) a : Any = [] a : Tuple = 0 while line_idx < len(A_ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a : str = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a : int = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a : Any = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a : Tuple = sorted(A_ , key=lambda A_ : _re_identifier.search(A_ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A_ , "w" , encoding="utf-8" ) as f: f.write("\n".join(A_ ) ) elif "\n".join(A_ ) != content: return True def lowercase ( A_ = False )-> List[str]: '''simple docstring''' a : Dict = [os.path.join(A_ , A_ ) for f in os.listdir(A_ ) if f.endswith(".py" )] a : int = [sort_auto_mapping(A_ , overwrite=A_ ) for fname in fnames] if not overwrite and any(A_ ): a : Any = [f for f, d in zip(A_ , A_ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {", ".join(A_ )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") __lowercase = parser.parse_args() sort_all_auto_mappings(not args.check_only)
40
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
321
0