code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : List[Any] = logging.getLogger() def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case ) Path(_snake_case ).open("""w""" ).writelines(_snake_case ) UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart' UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCAmelCase_ (a__ ): """simple docstring""" def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : Optional[Any] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def __magic_name__ (self ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : Any = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] ) SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : List[Any] = F''' run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] SCREAMING_SNAKE_CASE__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
25
1
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets UpperCAmelCase__ : int = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' UpperCAmelCase__ : List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' UpperCAmelCase__ : Union[str, Any] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,): if label_map is not None: for old_id, new_id in label_map.items(): SCREAMING_SNAKE_CASE__ : Dict = new_id # turn into Numpy arrays SCREAMING_SNAKE_CASE__ : Dict = np.array(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = np.array(_snake_case ) if reduce_labels: SCREAMING_SNAKE_CASE__ : Dict = 255 SCREAMING_SNAKE_CASE__ : Dict = label - 1 SCREAMING_SNAKE_CASE__ : int = 255 SCREAMING_SNAKE_CASE__ : Optional[int] = label != ignore_index SCREAMING_SNAKE_CASE__ : Dict = np.not_equal(_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[int] = pred_label[mask] SCREAMING_SNAKE_CASE__ : Any = np.array(_snake_case )[mask] SCREAMING_SNAKE_CASE__ : Tuple = pred_label[pred_label == label] SCREAMING_SNAKE_CASE__ : str = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : Any = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : List[str] = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) for result, gt_seg_map in zip(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = intersect_and_union( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = total_intersect_and_union( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) # compute metrics SCREAMING_SNAKE_CASE__ : Optional[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum() SCREAMING_SNAKE_CASE__ : Dict = total_area_intersect / total_area_union SCREAMING_SNAKE_CASE__ : Optional[Any] = total_area_intersect / total_area_label SCREAMING_SNAKE_CASE__ : int = np.nanmean(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = np.nanmean(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = all_acc SCREAMING_SNAKE_CASE__ : List[Any] = iou SCREAMING_SNAKE_CASE__ : Tuple = acc if nan_to_num is not None: SCREAMING_SNAKE_CASE__ : Any = {metric: np.nan_to_num(_snake_case ,nan=_snake_case ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ (datasets.Metric ): """simple docstring""" def __magic_name__ (self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), } ) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = mean_iou( results=SCREAMING_SNAKE_CASE__ , gt_seg_maps=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , ignore_index=SCREAMING_SNAKE_CASE__ , nan_to_num=SCREAMING_SNAKE_CASE__ , label_map=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ , ) return iou_result
25
"""simple docstring""" UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase__ : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
25
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
1
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa UpperCAmelCase__ : Any = logging.getLogger(__name__) class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = '''summarization''' __UpperCamelCase : Tuple = ['''loss'''] __UpperCamelCase : List[Any] = ROUGE_KEYS __UpperCamelCase : Union[str, Any] = '''rouge2''' def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , mode=self.mode , **SCREAMING_SNAKE_CASE__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) SCREAMING_SNAKE_CASE__ : int = Path(self.output_dir ) / """metrics.json""" SCREAMING_SNAKE_CASE__ : List[str] = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) SCREAMING_SNAKE_CASE__ : Tuple = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = self.config.model_type SCREAMING_SNAKE_CASE__ : int = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size SCREAMING_SNAKE_CASE__ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } SCREAMING_SNAKE_CASE__ : Tuple = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} SCREAMING_SNAKE_CASE__ : int = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_git_info()["""repo_sha"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = hparams.num_workers SCREAMING_SNAKE_CASE__ : Tuple = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] SCREAMING_SNAKE_CASE__ : Dict = self.decoder_start_token_id SCREAMING_SNAKE_CASE__ : Tuple = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: SCREAMING_SNAKE_CASE__ : List[str] = self.hparams.eval_max_gen_length else: SCREAMING_SNAKE_CASE__ : int = self.model.config.max_length SCREAMING_SNAKE_CASE__ : Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict[str, List[str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(SCREAMING_SNAKE_CASE__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) SCREAMING_SNAKE_CASE__ : int = True return readable_batch def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" return self.model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.batch_decode( SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return lmap(str.strip , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch["""input_ids"""], batch["""attention_mask"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = batch["""labels"""] if isinstance(self.model , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model._shift_right(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero SCREAMING_SNAKE_CASE__ : Tuple = decoder_input_ids self.save_readable_batch(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = self(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id SCREAMING_SNAKE_CASE__ : Any = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE__ ) assert lm_logits.shape[-1] == self.vocab_size SCREAMING_SNAKE_CASE__ : int = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = label_smoothed_nll_loss( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.hparams.label_smoothing , ignore_index=SCREAMING_SNAKE_CASE__ ) return (loss,) @property def __magic_name__ (self ) -> int: """simple docstring""" return self.tokenizer.pad_token_id def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self._step(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) ) # tokens per batch SCREAMING_SNAKE_CASE__ : List[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() SCREAMING_SNAKE_CASE__ : str = batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE__ : List[str] = batch["""input_ids"""].eq(self.pad ).sum() SCREAMING_SNAKE_CASE__ : List[Any] = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return self._generative_step(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="val" ) -> Dict: """simple docstring""" self.step_count += 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} SCREAMING_SNAKE_CASE__ : Tuple = losses["""loss"""] SCREAMING_SNAKE_CASE__ : List[str] = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } SCREAMING_SNAKE_CASE__ : Tuple = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) SCREAMING_SNAKE_CASE__ : torch.FloatTensor = torch.tensor(SCREAMING_SNAKE_CASE__ ).type_as(SCREAMING_SNAKE_CASE__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} SCREAMING_SNAKE_CASE__ : Tuple = self.step_count self.metrics[prefix].append(SCREAMING_SNAKE_CASE__ ) # callback writes this to self.metrics_save_path SCREAMING_SNAKE_CASE__ : int = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') SCREAMING_SNAKE_CASE__ : int = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = (time.time() - ta) / batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(batch["""labels"""] ) SCREAMING_SNAKE_CASE__ : str = self._step(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Dict = self.calc_generative_metrics(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = np.mean(lmap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) base_metrics.update(gen_time=SCREAMING_SNAKE_CASE__ , gen_len=SCREAMING_SNAKE_CASE__ , preds=SCREAMING_SNAKE_CASE__ , target=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return base_metrics def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" return self._generative_step(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return self.validation_epoch_end(SCREAMING_SNAKE_CASE__ , prefix="""test""" ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> SeqaSeqDataset: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.n_obs[type_path] SCREAMING_SNAKE_CASE__ : Tuple = self.target_lens[type_path] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dataset_class( self.tokenizer , type_path=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , **self.dataset_kwargs , ) return dataset def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dataset(SCREAMING_SNAKE_CASE__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE__ : Tuple = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE__ : List[Any] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , ) def __magic_name__ (self ) -> DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE__ ) return dataloader def __magic_name__ (self ) -> DataLoader: """simple docstring""" return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def __magic_name__ (self ) -> DataLoader: """simple docstring""" return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) add_generic_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) parser.add_argument( """--max_source_length""" , default=10_24 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--max_tokens_per_batch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--logger_name""" , type=SCREAMING_SNAKE_CASE__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=SCREAMING_SNAKE_CASE__ , default=5_00 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=SCREAMING_SNAKE_CASE__ , default="""summarization""" , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=SCREAMING_SNAKE_CASE__ , default=0.0 , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--src_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--tgt_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--eval_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument( """--val_metric""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=SCREAMING_SNAKE_CASE__ , default=1 , required=SCREAMING_SNAKE_CASE__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[Any] = '''translation''' __UpperCamelCase : List[Any] = ['''loss'''] __UpperCamelCase : Optional[Any] = ['''bleu'''] __UpperCamelCase : Dict = '''bleu''' def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = hparams.src_lang SCREAMING_SNAKE_CASE__ : Optional[Any] = hparams.tgt_lang def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> dict: """simple docstring""" return calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( _snake_case ,_snake_case=None ): Path(args.output_dir ).mkdir(exist_ok=_snake_case ) check_output_dir(_snake_case ,expected_items=3 ) if model is None: if "summarization" in args.task: SCREAMING_SNAKE_CASE__ : SummarizationModule = SummarizationModule(_snake_case ) else: SCREAMING_SNAKE_CASE__ : SummarizationModule = TranslationModule(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): SCREAMING_SNAKE_CASE__ : int = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE__ : Dict = os.environ.get("""WANDB_PROJECT""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[int] = WandbLogger(name=model.output_dir.name ,project=_snake_case ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE__ : List[Any] = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : int = args.val_metric == """loss""" SCREAMING_SNAKE_CASE__ : pl.Trainer = generic_train( _snake_case ,_snake_case ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,_snake_case ) ,early_stopping_callback=_snake_case ,logger=_snake_case ,) pickle_save(model.hparams ,model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model SCREAMING_SNAKE_CASE__ : Dict = """""" SCREAMING_SNAKE_CASE__ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir ,"""*.ckpt""" ) ,recursive=_snake_case ) ) if checkpoints: SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoints[-1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": UpperCAmelCase__ : Tuple = argparse.ArgumentParser() UpperCAmelCase__ : Optional[int] = pl.Trainer.add_argparse_args(parser) UpperCAmelCase__ : int = SummarizationModule.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase__ : List[str] = parser.parse_args() main(args)
25
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return {}, {}, {} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = image.size SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" ) SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Any = predicted_depth SCREAMING_SNAKE_CASE__ : Dict = depth return output_dict
25
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = 1_024 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4_096 SCREAMING_SNAKE_CASE__ : Tuple = 24 SCREAMING_SNAKE_CASE__ : Any = 16 SCREAMING_SNAKE_CASE__ : Any = [5, 11, 17, 23] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [256, 512, 1_024, 1_024] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = 150 SCREAMING_SNAKE_CASE__ : int = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : int = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(cached_download(hf_hub_url(_snake_case ,_snake_case ,repo_type="""dataset""" ) ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(_snake_case ,_snake_case ) def lowercase_ ( _snake_case ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" ) if "pretrained.model" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.embeddings""" ) if "patch_embed" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""patch_embed""" ,"""patch_embeddings""" ) if "pos_embed" in name: SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pos_embed""" ,"""position_embeddings""" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" ) if "proj" in name and "project" not in name: SCREAMING_SNAKE_CASE__ : int = name.replace("""proj""" ,"""projection""" ) if "blocks" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""blocks""" ,"""layer""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ : Any = name.replace("""mlp.fc2""" ,"""output.dense""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""norm1""" ,"""layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""norm2""" ,"""layernorm_after""" ) if "scratch.output_conv" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""scratch.output_conv""" ,"""head""" ) if "scratch" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""scratch""" ,"""neck""" ) if "layer1_rn" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""layer1_rn""" ,"""convs.0""" ) if "layer2_rn" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer2_rn""" ,"""convs.1""" ) if "layer3_rn" in name: SCREAMING_SNAKE_CASE__ : Any = name.replace("""layer3_rn""" ,"""convs.2""" ) if "layer4_rn" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" ) if "refinenet" in name: SCREAMING_SNAKE_CASE__ : List[str] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 SCREAMING_SNAKE_CASE__ : List[Any] = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""out_conv""" ,"""projection""" ) if "resConfUnit1" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""resConfUnit1""" ,"""residual_layer1""" ) if "resConfUnit2" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""resConfUnit2""" ,"""residual_layer2""" ) if "conv1" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""conv1""" ,"""convolution1""" ) if "conv2" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""conv2""" ,"""convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: SCREAMING_SNAKE_CASE__ : Dict = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained""" ,"""dpt""" ) if "bn" in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""bn""" ,"""batch_norm""" ) if "head" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""head""" ,"""head.head""" ) if "encoder.norm" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""encoder.norm""" ,"""layernorm""" ) if "auxlayer" in name: SCREAMING_SNAKE_CASE__ : int = name.replace("""auxlayer""" ,"""auxiliary_head.head""" ) return name def lowercase_ ( _snake_case ,_snake_case ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[: config.hidden_size, :] SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ) return im @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dpt_config(_snake_case ) # load original state_dict from URL SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(_snake_case ) # rename keys for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_snake_case ,_snake_case ) # load HuggingFace model SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # Check outputs on an image SCREAMING_SNAKE_CASE__ : Tuple = 480 if """ade""" in checkpoint_url else 384 SCREAMING_SNAKE_CASE__ : str = DPTImageProcessor(size=_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = prepare_img() SCREAMING_SNAKE_CASE__ : Dict = image_processor(_snake_case ,return_tensors="""pt""" ) # forward pass SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth # Assert logits SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(_snake_case ) assert ( torch.allclose(outputs[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] ,_snake_case ) ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=_snake_case ,) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=_snake_case ,) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase__ : Any = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
25
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : str = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Any = '''visual_bert''' def __init__(self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = visual_embedding_dim SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Optional[Any] = bypass_transformer SCREAMING_SNAKE_CASE__ : List[str] = special_visual_initialize
25
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE__ : int = Accelerator() SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
25
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : List[Any] = '''vit_mae''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=0.75 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = initializer_range SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : str = patch_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Union[str, Any] = qkv_bias SCREAMING_SNAKE_CASE__ : Any = decoder_num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_hidden_size SCREAMING_SNAKE_CASE__ : List[str] = decoder_num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_intermediate_size SCREAMING_SNAKE_CASE__ : str = mask_ratio SCREAMING_SNAKE_CASE__ : List[Any] = norm_pix_loss
25
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" ) SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers( _snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map( _snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_snake_case ,_snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( _snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case ) if isinstance(_snake_case ,_snake_case ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ): if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,): SCREAMING_SNAKE_CASE__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : Any = [] current_key_name.append(_snake_case ) if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) SCREAMING_SNAKE_CASE__ : str = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( _snake_case ): # Create a copy of the model with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Optional[int] = False if hasattr(_snake_case ,"""base_model_prefix""" ): SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" ) filtered_module_names.append(_snake_case ) return filtered_module_names def lowercase_ ( _snake_case ): for m in model.modules(): if isinstance(_snake_case ,bnb.nn.Linearabit ): return True return False def lowercase_ ( _snake_case ): return next(parameter.parameters() ).device def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : List[Any] = False offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case ) if hasattr(module._parameters[tensor_name] ,"""SCB""" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,) else: offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case ) offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ) set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
25
1
"""simple docstring""" UpperCAmelCase__ : Tuple = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCAmelCase__ : str = frozenset(['prompt', 'negative_prompt']) UpperCAmelCase__ : List[Any] = frozenset([]) UpperCAmelCase__ : List[Any] = frozenset(['image']) UpperCAmelCase__ : Any = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase__ : List[str] = frozenset(['image']) UpperCAmelCase__ : int = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCAmelCase__ : Dict = frozenset(['prompt', 'image', 'negative_prompt']) UpperCAmelCase__ : Optional[int] = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCAmelCase__ : Optional[int] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) UpperCAmelCase__ : Optional[int] = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase__ : Union[str, Any] = frozenset(['image', 'mask_image']) UpperCAmelCase__ : Optional[int] = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase__ : Tuple = frozenset(['example_image', 'image', 'mask_image']) UpperCAmelCase__ : Optional[int] = frozenset(['class_labels']) UpperCAmelCase__ : List[Any] = frozenset(['class_labels']) UpperCAmelCase__ : str = frozenset(['batch_size']) UpperCAmelCase__ : Any = frozenset([]) UpperCAmelCase__ : List[Any] = frozenset(['batch_size']) UpperCAmelCase__ : Union[str, Any] = frozenset([]) UpperCAmelCase__ : Dict = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCAmelCase__ : Union[str, Any] = frozenset(['prompt', 'negative_prompt']) UpperCAmelCase__ : Tuple = frozenset(['input_tokens']) UpperCAmelCase__ : List[Any] = frozenset(['input_tokens'])
25
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case ) SCREAMING_SNAKE_CASE__ : int = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
1
"""simple docstring""" import functools def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) @functools.cache def min_distance(_snake_case ,_snake_case ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa SCREAMING_SNAKE_CASE__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 ,_snake_case ) ,1 + min_distance(_snake_case ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,) return min_distance(0 ,0 ) if __name__ == "__main__": import doctest doctest.testmod()
25
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """""" # apply OCR SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : List[Any] = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : List[Any] = resample SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Dict = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) words_batch.append(SCREAMING_SNAKE_CASE__ ) boxes_batch.append(SCREAMING_SNAKE_CASE__ ) if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : List[Any] = words_batch SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch return data
25
1
"""simple docstring""" import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowercase_ ( _snake_case ,_snake_case ): assert isinstance(_snake_case ,_snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("""keep_in_memory""" ,[False, True] ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__ : Optional[int] = SqlDatasetReader( """dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ,keep_in_memory=_snake_case ).read() _check_sql_dataset(_snake_case ,_snake_case ) @require_sqlalchemy @pytest.mark.parametrize( """features""" ,[ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] ,) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ : Dict = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ : Any = ( Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_snake_case ,cache_dir=_snake_case ).read() _check_sql_dataset(_snake_case ,_snake_case ) def lowercase_ ( _snake_case ): with contextlib.closing(sqlitea.connect(_snake_case ) ) as con: SCREAMING_SNAKE_CASE__ : Optional[int] = con.cursor() cur.execute("""SELECT * FROM dataset""" ) for row in cur: yield row @require_sqlalchemy def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ : Any = os.path.join(_snake_case ,"""tmp.sql""" ) SCREAMING_SNAKE_CASE__ : Dict = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read() SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write() SCREAMING_SNAKE_CASE__ : List[str] = iter_sql_file(_snake_case ) SCREAMING_SNAKE_CASE__ : int = iter_sql_file(_snake_case ) for rowa, rowa in zip(_snake_case ,_snake_case ): assert rowa == rowa @require_sqlalchemy def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(_snake_case ,"""tmp.sql""" ) SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read() SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write() SCREAMING_SNAKE_CASE__ : Any = iter_sql_file(_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = iter_sql_file(_snake_case ) for rowa, rowa in zip(_snake_case ,_snake_case ): assert rowa == rowa @require_sqlalchemy def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_snake_case ,"""tmp.sql""" ) SCREAMING_SNAKE_CASE__ : Any = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read() with pytest.raises(_snake_case ): SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
25
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Optional[int] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : int = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2 return dft[0] def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" ) SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Any = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
1
"""simple docstring""" import re import string import numpy as np import datasets UpperCAmelCase__ : List[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' UpperCAmelCase__ : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' UpperCAmelCase__ : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ (datasets.Metric ): """simple docstring""" def __magic_name__ (self ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ) -> Optional[int]: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , """""" , SCREAMING_SNAKE_CASE__ ) for x in predictions] ) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , """""" , SCREAMING_SNAKE_CASE__ ) for x in references] ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if ignore_case: SCREAMING_SNAKE_CASE__ : Optional[int] = np.char.lower(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.char.lower(SCREAMING_SNAKE_CASE__ ) if ignore_punctuation: SCREAMING_SNAKE_CASE__ : Optional[Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) if ignore_numbers: SCREAMING_SNAKE_CASE__ : Optional[Any] = string.digits.maketrans("""""" , """""" , string.digits ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = predictions == references return {"exact_match": np.mean(SCREAMING_SNAKE_CASE__ ) * 1_00}
25
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" ,type=_snake_case ,help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) ,) # rest from the training program parser.add_argument("""training_script_args""" ,nargs=_snake_case ) return parser.parse_args() def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : int = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ : int = script_fpath.stem SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case ) # Patch sys.argv SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
25
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowercase_ ( _snake_case ): return (data["data"], data["target"]) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = XGBClassifier() classifier.fit(_snake_case ,_snake_case ) return classifier def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Dict = load_iris() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = data_handling(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = train_test_split( _snake_case ,_snake_case ,test_size=0.25 ) SCREAMING_SNAKE_CASE__ : int = iris["""target_names"""] # Create an XGBoost Classifier from the training data SCREAMING_SNAKE_CASE__ : Optional[int] = xgboost(_snake_case ,_snake_case ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _snake_case ,_snake_case ,_snake_case ,display_labels=_snake_case ,cmap="""Blues""" ,normalize="""true""" ,) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): return 1 if input_a == input_a else 0 def lowercase_ ( ): assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
1
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = {'vocab_file': 'vocab.txt'} UpperCAmelCase__ : Dict = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } UpperCAmelCase__ : Optional[Any] = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict() with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as reader: SCREAMING_SNAKE_CASE__ : List[Any] = reader.readlines() for index, token in enumerate(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = token.rstrip("""\n""" ) SCREAMING_SNAKE_CASE__ : Tuple = index return vocab class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__=2_00 ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = vocab SCREAMING_SNAKE_CASE__ : Optional[Any] = unk_token SCREAMING_SNAKE_CASE__ : Tuple = max_input_chars_per_word def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > self.max_input_chars_per_word: return [self.unk_token] SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : List[Any] = [] while start < len(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Any = len(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = None while start < end: SCREAMING_SNAKE_CASE__ : Any = """""".join(chars[start:end] ) if substr in self.vocab: SCREAMING_SNAKE_CASE__ : List[str] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = end return sub_tokens class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Any = VOCAB_FILES_NAMES __UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask'''] __UpperCamelCase : Any = False def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<d>" , SCREAMING_SNAKE_CASE__="</d>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="</n>" , SCREAMING_SNAKE_CASE__="</_>" , SCREAMING_SNAKE_CASE__="left" , **SCREAMING_SNAKE_CASE__ , ) -> int: """simple docstring""" requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ , eod_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , line_token=SCREAMING_SNAKE_CASE__ , space_token=SCREAMING_SNAKE_CASE__ , padding_side=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : int = bod_token SCREAMING_SNAKE_CASE__ : Any = eod_token SCREAMING_SNAKE_CASE__ : str = load_vocab(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = self.encoder[space_token] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] SCREAMING_SNAKE_CASE__ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) ) SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE__ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __magic_name__ (self ) -> Tuple: """simple docstring""" return self.encoder[self.bod_token] @property def __magic_name__ (self ) -> List[str]: """simple docstring""" return self.encoder[self.eod_token] @property def __magic_name__ (self ) -> Tuple: """simple docstring""" return self.encoder["\n"] @property def __magic_name__ (self ) -> int: """simple docstring""" return len(self.encoder ) def __magic_name__ (self ) -> int: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ , cut_all=SCREAMING_SNAKE_CASE__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) return output_tokens def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [i for i in token_ids if i >= 0] SCREAMING_SNAKE_CASE__ : Optional[int] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return token in self.encoder def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return "".join(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : List[str] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: SCREAMING_SNAKE_CASE__ : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory SCREAMING_SNAKE_CASE__ : int = 0 if " " in self.encoder: SCREAMING_SNAKE_CASE__ : int = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: SCREAMING_SNAKE_CASE__ : Dict = self.encoder["""\n"""] del self.encoder["\n"] SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) ) with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
25
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib UpperCAmelCase__ : Optional[int] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } UpperCAmelCase__ : List[Any] = logging.WARNING def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowercase_ ( ): return __name__.split(""".""" )[0] def lowercase_ ( ): return logging.getLogger(_get_library_name() ) def lowercase_ ( ): # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowercase_ ( _snake_case = None ): if name is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name() return logging.getLogger(_snake_case ) def lowercase_ ( ): return _get_library_root_logger().getEffectiveLevel() def lowercase_ ( _snake_case ): _get_library_root_logger().setLevel(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Tuple = False def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : str = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None def __iter__(self ) -> int: """simple docstring""" return iter(self._iterator ) def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument return return empty_fn def __enter__(self ) -> Dict: """simple docstring""" return self def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return UpperCAmelCase__ : str = True class lowerCAmelCase_ : """simple docstring""" def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) else: return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase__ : Tuple = _tqdm_cls() def lowercase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : Union[str, Any] = True def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : str = False
25
1
"""simple docstring""" from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = order # a_{0} ... a_{k} SCREAMING_SNAKE_CASE__ : Dict = [1.0] + [0.0] * order # b_{0} ... b_{k} SCREAMING_SNAKE_CASE__ : Tuple = [1.0] + [0.0] * order # x[n-1] ... x[n-k] SCREAMING_SNAKE_CASE__ : Tuple = [0.0] * self.order # y[n-1] ... y[n-k] SCREAMING_SNAKE_CASE__ : List[Any] = [0.0] * self.order def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None: """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < self.order: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE__ ) != self.order + 1: SCREAMING_SNAKE_CASE__ : List[str] = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != self.order + 1: SCREAMING_SNAKE_CASE__ : int = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = a_coeffs SCREAMING_SNAKE_CASE__ : List[str] = b_coeffs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> float: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) SCREAMING_SNAKE_CASE__ : Tuple = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.input_history[:-1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.output_history[:-1] SCREAMING_SNAKE_CASE__ : Optional[int] = sample SCREAMING_SNAKE_CASE__ : int = result return result
25
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = '''yolos''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] = image_size SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient SCREAMING_SNAKE_CASE__ : int = eos_coefficient class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Dict = version.parse('''1.11''' ) @property def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __magic_name__ (self ) -> float: """simple docstring""" return 1E-4 @property def __magic_name__ (self ) -> int: """simple docstring""" return 12
25
1
"""simple docstring""" import math import unittest def lowercase_ ( _snake_case ): assert isinstance(_snake_case ,_snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
25
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = LEDTokenizer __UpperCamelCase : Dict = LEDTokenizerFast __UpperCamelCase : str = True def __magic_name__ (self ) -> Dict: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE__ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __magic_name__ (self ) -> Tuple: """simple docstring""" return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def __magic_name__ (self ) -> Tuple: """simple docstring""" return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] SCREAMING_SNAKE_CASE__ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_torch def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE__ ) self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE__ ) @require_torch def __magic_name__ (self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : str = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def __magic_name__ (self ) -> Dict: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Tuple = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["""A long paragraph for summarization."""] SCREAMING_SNAKE_CASE__ : List[str] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["""input_ids"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Any = ["""Summary of the text.""", """Another summary."""] SCREAMING_SNAKE_CASE__ : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] SCREAMING_SNAKE_CASE__ : int = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output["""input_ids"""]] SCREAMING_SNAKE_CASE__ : str = tokenizer.pad(SCREAMING_SNAKE_CASE__ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> str: """simple docstring""" pass def __magic_name__ (self ) -> Any: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = """A, <mask> AllenNLP sentence.""" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) SCREAMING_SNAKE_CASE__ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
25
"""simple docstring""" import math import unittest def lowercase_ ( _snake_case ): assert isinstance(_snake_case ,_snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
25
1
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if b == 0: return 1 if (b % 2) == 0: return actual_power(_snake_case ,int(b / 2 ) ) * actual_power(_snake_case ,int(b / 2 ) ) else: return a * actual_power(_snake_case ,int(b / 2 ) ) * actual_power(_snake_case ,int(b / 2 ) ) def lowercase_ ( _snake_case ,_snake_case ): if b < 0: return 1 / actual_power(_snake_case ,_snake_case ) return actual_power(_snake_case ,_snake_case ) if __name__ == "__main__": print(power(-2, -3))
25
"""simple docstring""" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5 for _ in range(1 ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"""{ugly_numbers(2_0_0) = }""")
25
1
"""simple docstring""" def lowercase_ ( _snake_case = 200 ): SCREAMING_SNAKE_CASE__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE__ : Any = [0] * (pence + 1) SCREAMING_SNAKE_CASE__ : Tuple = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_snake_case ,pence + 1 ,1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_0_0) == 7_3_6_8_2
25
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride SCREAMING_SNAKE_CASE__ : Any = time_stride SCREAMING_SNAKE_CASE__ : Optional[int] = max_length SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
25
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = MvpTokenizer __UpperCamelCase : Any = MvpTokenizerFast __UpperCamelCase : Optional[int] = True __UpperCamelCase : int = filter_roberta_detectors def __magic_name__ (self ) -> int: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE__ : Optional[int] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" return "lower newer", "lower newer" @cached_property def __magic_name__ (self ) -> Any: """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def __magic_name__ (self ) -> List[str]: """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] SCREAMING_SNAKE_CASE__ : List[str] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE__ : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Test that special tokens are reset @require_torch def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE__ ) self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE__ ) @require_torch def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def __magic_name__ (self ) -> Dict: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(batch.input_ids.shape , (2, 10_24) ) @require_torch def __magic_name__ (self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["""A long paragraph for summarization."""] SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""input_ids"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def __magic_name__ (self ) -> Dict: """simple docstring""" pass def __magic_name__ (self ) -> Tuple: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """A, <mask> AllenNLP sentence.""" SCREAMING_SNAKE_CASE__ : int = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
25
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
1
"""simple docstring""" import requests UpperCAmelCase__ : Optional[Any] = 'YOUR API KEY' def lowercase_ ( _snake_case ,_snake_case = giphy_api_key ): SCREAMING_SNAKE_CASE__ : Tuple = """+""".join(query.split() ) SCREAMING_SNAKE_CASE__ : Optional[int] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}''' SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(_snake_case ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('\n'.join(get_gifs('space ship')))
25
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position UpperCAmelCase__ = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip UpperCAmelCase__ = concatenate_datasets UpperCAmelCase__ = DownloadConfig UpperCAmelCase__ = DownloadManager UpperCAmelCase__ = DownloadMode UpperCAmelCase__ = DownloadConfig UpperCAmelCase__ = DownloadMode UpperCAmelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
0
"""simple docstring""" UpperCAmelCase__ : List[str] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] UpperCAmelCase__ : Tuple = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] UpperCAmelCase__ : Union[str, Any] = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
25
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Dict ={ 'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : Union[str, Any] = """nllb-moe""" a__ : Optional[Any] = ["""past_key_values"""] a__ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Optional[Any] , __a : Tuple=128112 , __a : Optional[int]=1024 , __a : List[str]=12 , __a : int=4096 , __a : str=16 , __a : Any=12 , __a : List[str]=4096 , __a : str=16 , __a : Any=0.05 , __a : str=0.05 , __a : Dict=True , __a : Dict=True , __a : Optional[Any]="relu" , __a : List[str]=1024 , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=0.0 , __a : int=0.02 , __a : Optional[int]=2 , __a : List[Any]=True , __a : Any=False , __a : List[str]="float32" , __a : List[str]=False , __a : Optional[int]=128 , __a : Any=64 , __a : Any=4 , __a : List[str]=4 , __a : Union[str, Any]=0.0_01 , __a : Optional[Any]=0.0_01 , __a : List[Any]="all" , __a : Tuple=False , __a : Any=False , __a : Dict=1.0 , __a : int=0.2 , __a : Dict=1 , __a : Dict=0 , __a : Optional[int]=2 , __a : Any=False , **__a : Optional[int] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = use_cache UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ = router_z_loss_coef UpperCAmelCase_ = router_aux_loss_coef UpperCAmelCase_ = decoder_sparse_step UpperCAmelCase_ = encoder_sparse_step UpperCAmelCase_ = num_experts UpperCAmelCase_ = expert_capacity UpperCAmelCase_ = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) UpperCAmelCase_ = router_dtype UpperCAmelCase_ = router_ignore_padding_tokens UpperCAmelCase_ = batch_prioritized_routing UpperCAmelCase_ = second_expert_policy UpperCAmelCase_ = normalize_router_prob_before_dropping UpperCAmelCase_ = moe_eval_capacity_token_fraction UpperCAmelCase_ = moe_token_dropout UpperCAmelCase_ = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
0
'''simple docstring''' import math from datetime import datetime, timedelta def _SCREAMING_SNAKE_CASE (A ) -> datetime: """simple docstring""" lowercase__ = year % 19 lowercase__ = year % 4 lowercase__ = year % 7 lowercase__ = math.floor(year / 100 ) lowercase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 ) lowercase__ = leap_day_inhibits / 4 lowercase__ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 lowercase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 lowercase__ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon lowercase__ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(A , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(A , 4 , 18 ) else: return datetime(A , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): lowerCamelCase : str = 'will be' if year > datetime.now().year else 'was' print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
2
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : List[Any] = logging.getLogger() def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case ) Path(_snake_case ).open("""w""" ).writelines(_snake_case ) UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart' UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCAmelCase_ (a__ ): """simple docstring""" def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : Optional[Any] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def __magic_name__ (self ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : Any = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] ) SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : List[Any] = F''' run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] SCREAMING_SNAKE_CASE__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
25
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : str = [] if len(snake_case__ ) == 1: return [nums.copy()] for _ in range(len(snake_case__ ) ): A : Dict = nums.pop(0 ) A : int = permute(snake_case__ ) for perm in permutations: perm.append(snake_case__ ) result.extend(snake_case__ ) nums.append(snake_case__ ) return result def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' def backtrack(snake_case__ ): if start == len(snake_case__ ) - 1: output.append(nums[:] ) else: for i in range(snake_case__ , len(snake_case__ ) ): A, A : Any = nums[i], nums[start] backtrack(start + 1 ) A, A : List[str] = nums[i], nums[start] # backtrack A : Optional[Any] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowercase : Union[str, Any] = permutea([1, 2, 3]) print(res) doctest.testmod()
3
"""simple docstring""" UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase__ : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
25
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __snake_case =logging.get_logger(__name__) __snake_case ={ """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __snake_case ={ """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } __snake_case ={"""facebook/blenderbot-3B""": 128} class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[Any] = VOCAB_FILES_NAMES lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask'''] lowerCamelCase : List[Any] = BlenderbotTokenizer def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int: super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space: lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) ) lowerCAmelCase = add_prefix_space lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ ) lowerCAmelCase = add_prefix_space lowerCAmelCase = 'post_processor' lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) if tokenizer_component_instance: lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase = tuple(state['sep'] ) if "cls" in state: lowerCAmelCase = tuple(state['cls'] ) lowerCAmelCase = False if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space: lowerCAmelCase = add_prefix_space lowerCAmelCase = True if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets: lowerCAmelCase = trim_offsets lowerCAmelCase = True if changes_to_apply: lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) ) lowerCAmelCase = component_class(**UpperCAmelCase__ ) setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCAmelCase ( self : Union[str, Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple: lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value lowerCAmelCase = value def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding: lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding: lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any: return token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]: lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase__ ) lowerCAmelCase = ' '.join(UpperCAmelCase__ ) lowerCAmelCase = self.encode(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > self.model_max_length: lowerCAmelCase = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
4
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str: """simple docstring""" _lowercase =[[] for _ in range(__snake_case )] _lowercase =key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(__snake_case ) <= key: return input_string for position, character in enumerate(__snake_case ): _lowercase =position % (lowest * 2) # puts it in bounds _lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__snake_case ) _lowercase =[''''''.join(__snake_case ) for row in temp_grid] _lowercase =''''''.join(__snake_case ) return output_string def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str: """simple docstring""" _lowercase =[] _lowercase =key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string _lowercase =[[] for _ in range(__snake_case )] # generates template for position in range(len(__snake_case ) ): _lowercase =position % (lowest * 2) # puts it in bounds _lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) _lowercase =0 for row in temp_grid: # fills in the characters _lowercase =input_string[counter : counter + len(__snake_case )] grid.append(list(__snake_case ) ) counter += len(__snake_case ) _lowercase ='''''' # reads as zigzag for position in range(len(__snake_case ) ): _lowercase =position % (lowest * 2) # puts it in bounds _lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCAmelCase_ ( __snake_case ) -> dict[int, str]: """simple docstring""" _lowercase ={} for key_guess in range(1 , len(__snake_case ) ): # tries every key _lowercase =decrypt(__snake_case , __snake_case ) return results if __name__ == "__main__": import doctest doctest.testmod()
5
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return {}, {}, {} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = image.size SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" ) SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Any = predicted_depth SCREAMING_SNAKE_CASE__ : Dict = depth return output_dict
25
0
from __future__ import annotations A : Optional[int] = [] def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool: for i in range(len(a__ ) ): if board[row][i] == 1: return False for i in range(len(a__ ) ): if board[i][column] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ): if board[i][j] == 1: return False return True def __lowerCAmelCase ( a__ , a__ ) -> bool: if row >= len(a__ ): solution.append(a__ ) printboard(a__ ) print() return True for i in range(len(a__ ) ): if is_safe(a__ , a__ , a__ ): __a = 1 solve(a__ , row + 1 ) __a = 0 return False def __lowerCAmelCase ( a__ ) -> None: for i in range(len(a__ ) ): for j in range(len(a__ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) A : int = 8 A : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
6
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
0
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 lowercase_ = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[Any] )-> str: '''simple docstring''' A__ = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[int] )-> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='test-dynamic-config' ) except HTTPError: pass def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = BertConfig( vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 ) config.push_to_hub('test-config',use_auth_token=self._token ) A__ = BertConfig.from_pretrained(F'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) ) # Reset repo delete_repo(token=self._token,repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_,repo_id='test-config',push_to_hub=lowercase_,use_auth_token=self._token ) A__ = BertConfig.from_pretrained(F'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) ) def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ = BertConfig( vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 ) config.push_to_hub('valid_org/test-config-org',use_auth_token=self._token ) A__ = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_,repo_id='valid_org/test-config-org',push_to_hub=lowercase_,use_auth_token=self._token ) A__ = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' CustomConfig.register_for_auto_class() A__ = CustomConfig(attribute=4_2 ) config.push_to_hub('test-dynamic-config',use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map,{'AutoConfig': 'custom_configuration.CustomConfig'} ) A__ = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config',trust_remote_code=lowercase_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__,'CustomConfig' ) self.assertEqual(new_config.attribute,4_2 ) class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A__ = c.n_embd + 1 # int A__ = c.resid_pdrop + 1.0 # float A__ = not c.scale_attn_weights # bool A__ = c.summary_type + 'foo' # str c.update_from_string( F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' ) self.assertEqual(lowercase_,c.n_embd,'mismatch for key: n_embd' ) self.assertEqual(lowercase_,c.resid_pdrop,'mismatch for key: resid_pdrop' ) self.assertEqual(lowercase_,c.scale_attn_weights,'mismatch for key: scale_attn_weights' ) self.assertEqual(lowercase_,c.summary_type,'mismatch for key: summary_type' ) def snake_case__ ( self : Any )-> Optional[int]: '''simple docstring''' A__ = PretrainedConfig() A__ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowercase_,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) A__ = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase_,lowercase_ )] if len(lowercase_ ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' F' {", ".join(lowercase_ )}.' ) def snake_case__ ( self : Optional[Any] )-> Optional[int]: '''simple docstring''' with self.assertRaises(lowercase_ ): # config is in subfolder, the following should not work without specifying the subfolder A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder',subfolder='bert' ) self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' A__ = mock.Mock() A__ = 5_0_0 A__ = {} A__ = HTTPError A__ = {} # Download this model to make sure it's in the cache. A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request',return_value=lowercase_ ) as mock_head: A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' A__ = AutoConfig.from_pretrained('bert-base-cased' ) A__ = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowercase_ ) A__ = 2 json.dump(configuration.to_dict(),open(os.path.join(lowercase_,'config.4.0.0.json' ),'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A__ = AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(new_configuration.hidden_size,2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A__ = ['config.42.0.0.json'] A__ = 7_6_8 configuration.save_pretrained(lowercase_ ) shutil.move(os.path.join(lowercase_,'config.4.0.0.json' ),os.path.join(lowercase_,'config.42.0.0.json' ) ) A__ = AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(new_configuration.hidden_size,7_6_8 ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ = 'hf-internal-testing/test-two-configs' import transformers as new_transformers A__ = 'v4.0.0' A__ , A__ = new_transformers.models.auto.AutoConfig.from_pretrained( lowercase_,return_unused_kwargs=lowercase_ ) self.assertEqual(new_configuration.hidden_size,2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowercase_,{} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A__ = 'v3.0.0' A__ = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase_ ) self.assertEqual(old_configuration.hidden_size,7_6_8 )
7
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE__ : int = Accelerator() SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
25
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "isbn/0140328726" ): snake_case_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: snake_case_ = F'''{olid} is not a valid Open Library olid''' raise ValueError(SCREAMING_SNAKE_CASE__ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } snake_case_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} snake_case_ = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] snake_case_ = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = ''', '''.join(SCREAMING_SNAKE_CASE__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(f"""\nSearching Open Library for ISBN: {isbn}...\n""") try: lowerCAmelCase_ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}""")) print('''\n'''.join(f"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"""Sorry, there are no results for ISBN: {isbn}.""")
8
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" ) SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers( _snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map( _snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_snake_case ,_snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( _snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case ) if isinstance(_snake_case ,_snake_case ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ): if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,): SCREAMING_SNAKE_CASE__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : Any = [] current_key_name.append(_snake_case ) if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) SCREAMING_SNAKE_CASE__ : str = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( _snake_case ): # Create a copy of the model with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Optional[int] = False if hasattr(_snake_case ,"""base_model_prefix""" ): SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" ) filtered_module_names.append(_snake_case ) return filtered_module_names def lowercase_ ( _snake_case ): for m in model.modules(): if isinstance(_snake_case ,bnb.nn.Linearabit ): return True return False def lowercase_ ( _snake_case ): return next(parameter.parameters() ).device def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : List[Any] = False offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case ) if hasattr(module._parameters[tensor_name] ,"""SCB""" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,) else: offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case ) offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ) set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
25
0
from __future__ import annotations from statistics import mean def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = [0] * no_of_processes __SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i] __SCREAMING_SNAKE_CASE : list[int] = [] __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Any = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : Tuple = -1 for i in range(lowercase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(lowercase__ ) if len(lowercase__ ) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __SCREAMING_SNAKE_CASE : List[str] = i total_time += burst_time[target_process] completed += 1 __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Optional[int] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = [0] * no_of_processes for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('[TEST CASE 01]') __lowerCAmelCase : Union[str, Any] =4 __lowerCAmelCase : str =[2, 5, 3, 7] __lowerCAmelCase : Optional[int] =[0, 0, 0, 0] __lowerCAmelCase : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : List[Any] =calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time') for i, process_id in enumerate(list(range(1, 5))): print( f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
9
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case ) SCREAMING_SNAKE_CASE__ : int = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Optional[int] =hf_hub_url(repo_id=__a , path=__a , revision=__a ) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__a )}"""
10
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """""" # apply OCR SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : List[Any] = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : List[Any] = resample SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Dict = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) words_batch.append(SCREAMING_SNAKE_CASE__ ) boxes_batch.append(SCREAMING_SNAKE_CASE__ ) if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : List[Any] = words_batch SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch return data
25
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False) -> Dict: _A : Dict = scheduler _A : Union[str, Any] = optimizers if isinstance(__lowerCamelCase , (list, tuple)) else [optimizers] _A : int = split_batches _A : int = step_with_optimizer _A : Any = GradientState() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _A : List[Any] = AcceleratorState().num_processes for _ in range(__lowerCamelCase): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: return self.scheduler.get_last_lr() def _lowerCamelCase ( self) -> Optional[Any]: return self.scheduler.state_dict() def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: self.scheduler.load_state_dict(__lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: return self.scheduler.get_lr() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase)
11
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Optional[int] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : int = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2 return dft[0] def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" ) SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Any = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCAmelCase_ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') UpperCAmelCase_ = f"""https://www.google.com/search?q={query}&num=100""" UpperCAmelCase_ = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: UpperCAmelCase_ = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: UpperCAmelCase_ = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
12
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" ,type=_snake_case ,help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) ,) # rest from the training program parser.add_argument("""training_script_args""" ,nargs=_snake_case ) return parser.parse_args() def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : int = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ : int = script_fpath.stem SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case ) # Patch sys.argv SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
25
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase : Union[str, Any] = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model( "HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , ) return model, model_cfg def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = {} SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*" SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase ) if re.match(_UpperCAmelCase , _UpperCAmelCase ): # replace sequential layers with list SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." ) elif re.match(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2 SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." ) if "audio" and "qkv" in key: # split qkv into query key and value SCREAMING_SNAKE_CASE_: Tuple = value SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3 SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim] SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :] SCREAMING_SNAKE_CASE_: str = query_layer SCREAMING_SNAKE_CASE_: int = key_layer SCREAMING_SNAKE_CASE_: List[Any] = value_layer else: SCREAMING_SNAKE_CASE_: int = value return model_state_dict def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase ) clap_model.eval() SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict() SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ClapConfig() SCREAMING_SNAKE_CASE_: Tuple = enable_fusion SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase ) # ignore the spectrogram embedding layer model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) transformers_config.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") lowerCAmelCase : int = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
13
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): return 1 if input_a == input_a else 0 def lowercase_ ( ): assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib UpperCAmelCase__ : Optional[int] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } UpperCAmelCase__ : List[Any] = logging.WARNING def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowercase_ ( ): return __name__.split(""".""" )[0] def lowercase_ ( ): return logging.getLogger(_get_library_name() ) def lowercase_ ( ): # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowercase_ ( _snake_case = None ): if name is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name() return logging.getLogger(_snake_case ) def lowercase_ ( ): return _get_library_root_logger().getEffectiveLevel() def lowercase_ ( _snake_case ): _get_library_root_logger().setLevel(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Tuple = False def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : str = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None def __iter__(self ) -> int: """simple docstring""" return iter(self._iterator ) def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument return return empty_fn def __enter__(self ) -> Dict: """simple docstring""" return self def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return UpperCAmelCase__ : str = True class lowerCAmelCase_ : """simple docstring""" def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) else: return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase__ : Tuple = _tqdm_cls() def lowercase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : Union[str, Any] = True def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : str = False
25
0
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = '''yolos''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] = image_size SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient SCREAMING_SNAKE_CASE__ : int = eos_coefficient class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Dict = version.parse('''1.11''' ) @property def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __magic_name__ (self ) -> float: """simple docstring""" return 1E-4 @property def __magic_name__ (self ) -> int: """simple docstring""" return 12
25
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } lowerCAmelCase_ = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } lowerCAmelCase_ = '▁' class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Optional[int] = AlbertTokenizer def __init__( self : List[Any] ,_snake_case : Tuple=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Dict=False ,_snake_case : Optional[int]="[CLS]" ,_snake_case : List[Any]="[SEP]" ,_snake_case : Tuple="<unk>" ,_snake_case : int="[SEP]" ,_snake_case : List[Any]="<pad>" ,_snake_case : Optional[int]="[CLS]" ,_snake_case : int="[MASK]" ,**_snake_case : Optional[Any] ,) -> List[str]: """simple docstring""" lowercase__ : List[str] = ( AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ,normalized=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token ) super().__init__( _snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,**_snake_case ,) lowercase__ : Optional[int] = do_lower_case lowercase__ : str = remove_space lowercase__ : Dict = keep_accents lowercase__ : List[str] = vocab_file lowercase__ : str = False if not self.vocab_file else True def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : Union[str, Any] = [self.sep_token_id] lowercase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase ( self : Dict ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : List[str] = [self.sep_token_id] lowercase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : List[Any] = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file ,_snake_case ) return (out_vocab_file,)
16
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] __UpperCAmelCase : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : np.ndarray __UpperCAmelCase : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
17
"""simple docstring""" import math import unittest def lowercase_ ( _snake_case ): assert isinstance(_snake_case ,_snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
25
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class a__ ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } SCREAMING_SNAKE_CASE_ : Any = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(_A ),_A ) def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 ) self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) ) SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 ) self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) ) @require_torch def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A ) self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) ) SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A ) self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A ) self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) ) SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A ) self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A ) self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A ) self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) ) def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 ) self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) ) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 ) self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) ) @require_torch def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A ) self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A ) self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A ) self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A ) self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : int = jnp.array(_A ) self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 ) SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A ) self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) ) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 ) self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) ) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 ) self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) ) @require_torch def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 ) SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A ) self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) ) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 ) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A ) self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A ) self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) ) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 ) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A ) self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 ) SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A ) self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) ) SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A ) self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) ) def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 ) self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) ) @require_torch def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A ) self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A ) self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A ) self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
18
"""simple docstring""" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5 for _ in range(1 ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"""{ugly_numbers(2_0_0) = }""")
25
0
from collections.abc import Callable import numpy as np def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) ) lowerCamelCase_ = np.zeros((n + 1,) ) lowerCamelCase_ = ya lowerCamelCase_ = xa for k in range(lowerCamelCase__ ): lowerCamelCase_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
19
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride SCREAMING_SNAKE_CASE__ : Any = time_stride SCREAMING_SNAKE_CASE__ : Optional[int] = max_length SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
25
0
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) lowercase : Dict = """bert-base-cased""" lowercase : int = """fp16""" lowercase : Optional[int] = """bf16""" lowercase : str = [FPaa, BFaa] @require_fsdp @require_cuda class __snake_case ( lowerCAmelCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : str = dict( ACCELERATE_USE_FSDP="""true""" ,MASTER_ADDR="""localhost""" ,MASTER_PORT="""10999""" ,RANK="""0""" ,LOCAL_RANK="""0""" ,WORLD_SIZE="""1""" ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case ): lowercase : Tuple = self.dist_env.copy() lowercase : Dict = f"{i + 1}" lowercase : Optional[int] = strategy with mockenv_context(**snake_case ): lowercase : Any = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case ): lowercase : Dict = self.dist_env.copy() lowercase : List[str] = prefetch_policy with mockenv_context(**snake_case ): lowercase : Tuple = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case ): lowercase : int = self.dist_env.copy() lowercase : List[str] = state_dict_type with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = AutoModel.from_pretrained(snake_case ) for policy in FSDP_AUTO_WRAP_POLICY: lowercase : str = self.dist_env.copy() lowercase : Optional[Any] = policy if policy == "TRANSFORMER_BASED_WRAP": lowercase : List[Any] = """BertLayer""" elif policy == "SIZE_BASED_WRAP": lowercase : List[str] = """2000""" with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) lowercase : Union[str, Any] = self.dist_env.copy() lowercase : Optional[int] = """TRANSFORMER_BASED_WRAP""" lowercase : Optional[int] = """T5Layer""" with mockenv_context(**snake_case ): lowercase : str = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) lowercase : List[str] = self.dist_env.copy() lowercase : Tuple = """SIZE_BASED_WRAP""" lowercase : List[Any] = """0""" with mockenv_context(**snake_case ): lowercase : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: lowercase : str = self.dist_env.copy() lowercase : List[Any] = mp_dtype with mockenv_context(**snake_case ): lowercase : Dict = Accelerator() if mp_dtype == "fp16": lowercase : str = torch.floataa elif mp_dtype == "bf16": lowercase : Optional[int] = torch.bfloataa lowercase : Optional[Any] = MixedPrecision(param_dtype=snake_case ,reduce_dtype=snake_case ,buffer_dtype=snake_case ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,snake_case ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,snake_case ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: lowercase : Dict = self.dist_env.copy() lowercase : Optional[int] = str(snake_case ).lower() with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=snake_case ) ) @require_fsdp @require_multi_gpu @slow class __snake_case ( lowerCAmelCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : Optional[int] = 0.82 lowercase : List[str] = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] lowercase : int = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } lowercase : Dict = 160 lowercase : Optional[Any] = 160 lowercase : int = inspect.getfile(accelerate.test_utils ) lowercase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = os.path.join(self.test_scripts_folder ,"""test_performance.py""" ) lowercase : int = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: lowercase : Optional[int] = cmd.copy() for i, strategy in enumerate(snake_case ): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_checkpointing.py""" ) lowercase : int = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(snake_case ): lowercase : Any = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue lowercase : Union[str, Any] = len(snake_case ) for state_dict_type in FSDP_STATE_DICT_TYPE: lowercase : Optional[int] = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) lowercase : Tuple = cmd_config[:-1] lowercase : Tuple = os.path.join(self.tmpdir ,"""epoch_0""" ) cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_peak_memory_usage.py""" ) lowercase : List[Any] = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): lowercase : Dict = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(snake_case ): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() )
20
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : str = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
0
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A_ : def __init__( self : List[str] ): _UpperCAmelCase = {} def lowercase ( self : int , snake_case_ : str ): _UpperCAmelCase = {} def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : float ): if nodea not in self.connections: self.add_node(snake_case_ ) if nodea not in self.connections: self.add_node(snake_case_ ) _UpperCAmelCase = probability def lowercase ( self : Tuple ): return list(self.connections ) def lowercase ( self : Dict , snake_case_ : str ): _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def UpperCAmelCase_ ( __lowercase : str , __lowercase : list[tuple[str, str, float]] , __lowercase : int ) -> dict[str, int]: '''simple docstring''' _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__lowercase , __lowercase , __lowercase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(__lowercase ): _UpperCAmelCase = graph.transition(__lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
22
"""simple docstring""" UpperCAmelCase__ : List[str] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] UpperCAmelCase__ : Tuple = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] UpperCAmelCase__ : Union[str, Any] = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
25
0
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCamelCase__: Union[str, Any] = "http://www.mocksite.com/file1.txt" UpperCamelCase__: Tuple = "\"text\": [\"foo\", \"foo\"]" UpperCamelCase__: Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class SCREAMING_SNAKE_CASE: """simple docstring""" lowerCamelCase__ = 200 lowerCamelCase__ = {"""Content-Length""": """100"""} lowerCamelCase__ = {} def A ( self : Tuple , **__snake_case : Dict ) -> List[Any]: return [bytes(__snake_case , '''utf-8''' )] def snake_case_ ( *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ) -> List[str]: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> str: import requests monkeypatch.setattr(_lowerCAmelCase , '''request''' , _lowerCAmelCase ) UpperCAmelCase : Optional[Any] = URL if issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : List[Any] = url elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : int = [url] elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : str = {'''train''': url} UpperCAmelCase : Dict = '''dummy''' UpperCAmelCase : Optional[int] = '''downloads''' UpperCAmelCase : List[str] = tmp_path UpperCAmelCase : Optional[Any] = DownloadConfig( cache_dir=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , use_etag=_lowerCAmelCase , ) UpperCAmelCase : Tuple = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase ) UpperCAmelCase : Any = dl_manager.download(_lowerCAmelCase ) UpperCAmelCase : Any = urls for downloaded_paths in [downloaded_paths]: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Optional[Any] = [downloaded_paths] UpperCAmelCase : Any = [urls] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): assert "train" in downloaded_paths.keys() UpperCAmelCase : Any = downloaded_paths.values() UpperCAmelCase : Tuple = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_lowerCAmelCase , _lowerCAmelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] UpperCAmelCase : List[str] = Path(_lowerCAmelCase ) UpperCAmelCase : List[Any] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() UpperCAmelCase : List[Any] = downloaded_path.read_text() assert content == CONTENT UpperCAmelCase : Union[str, Any] = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() UpperCAmelCase : Any = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> int: UpperCAmelCase : Optional[int] = str(_lowerCAmelCase ) if issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Union[str, Any] = filename elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : int = [filename] elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Union[str, Any] = {'''train''': filename} UpperCAmelCase : Optional[int] = '''dummy''' UpperCAmelCase : Union[str, Any] = xz_file.parent UpperCAmelCase : Tuple = '''extracted''' UpperCAmelCase : List[Any] = DownloadConfig( cache_dir=_lowerCAmelCase , use_etag=_lowerCAmelCase , ) UpperCAmelCase : Optional[Any] = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase ) UpperCAmelCase : List[Any] = dl_manager.extract(_lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = paths for extracted_paths in [extracted_paths]: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : int = [extracted_paths] UpperCAmelCase : int = [paths] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): assert "train" in extracted_paths.keys() UpperCAmelCase : Any = extracted_paths.values() UpperCAmelCase : Tuple = paths.values() assert extracted_paths for extracted_path, input_path in zip(_lowerCAmelCase , _lowerCAmelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] UpperCAmelCase : Union[str, Any] = Path(_lowerCAmelCase ) UpperCAmelCase : List[Any] = extracted_path.parts assert parts[-1] == hash_url_to_filename(_lowerCAmelCase , etag=_lowerCAmelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() UpperCAmelCase : Union[str, Any] = extracted_path.read_text() UpperCAmelCase : List[str] = text_file.read_text() assert extracted_file_content == expected_file_content def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> str: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(_lowerCAmelCase , start=1 ): UpperCAmelCase : str = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> Optional[int]: UpperCAmelCase : Optional[Any] = request.getfixturevalue(_lowerCAmelCase ) UpperCAmelCase : Any = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ): _test_jsonl(_lowerCAmelCase , _lowerCAmelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ) -> Tuple: UpperCAmelCase : int = request.getfixturevalue(_lowerCAmelCase ) UpperCAmelCase : str = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ): _test_jsonl(_lowerCAmelCase , _lowerCAmelCase ) assert num_tar == 1 assert num_jsonl == 2 def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_lowerCAmelCase ) , start=1 ): assert os.path.basename(_lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
23
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
0
import pprint import requests snake_case_ = 'https://zenquotes.io/api' def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case_ = random_quotes() pprint.pprint(response)
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : List[Any] = logging.getLogger() def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case ) Path(_snake_case ).open("""w""" ).writelines(_snake_case ) UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart' UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCAmelCase_ (a__ ): """simple docstring""" def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : Optional[Any] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def __magic_name__ (self ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : Any = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] ) SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : List[Any] = F''' run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] SCREAMING_SNAKE_CASE__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
25
0
from manim import * class lowercase ( UpperCamelCase__ ): def a__ ( self ) -> Dict: _A : List[Any] = Rectangle(height=0.5 , width=0.5 ) _A : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A : Optional[Any] = [mem.copy() for i in range(6 )] _A : List[Any] = [mem.copy() for i in range(6 )] _A : Optional[Any] = VGroup(*_a ).arrange(_a , buff=0 ) _A : Any = VGroup(*_a ).arrange(_a , buff=0 ) _A : Optional[int] = VGroup(_a , _a ).arrange(_a , buff=0 ) _A : Optional[Any] = Text("""CPU""" , font_size=24 ) _A : List[str] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_a ) _A : Optional[Any] = [mem.copy() for i in range(4 )] _A : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 ) _A : Optional[Any] = Text("""GPU""" , font_size=24 ) _A : Union[str, Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) gpu.move_to([-1, -1, 0] ) self.add(_a ) _A : str = [mem.copy() for i in range(6 )] _A : int = VGroup(*_a ).arrange(_a , buff=0 ) _A : str = Text("""Model""" , font_size=24 ) _A : List[Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) model.move_to([3, -1.0, 0] ) self.add(_a ) _A : List[str] = [] for i, rect in enumerate(_a ): rect.set_stroke(_a ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _A : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=_a , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=_a , buff=0.0 ) self.add(_a ) cpu_targs.append(_a ) _A : Union[str, Any] = [mem.copy() for i in range(6 )] _A : str = VGroup(*_a ).arrange(_a , buff=0 ) _A : List[str] = Text("""Loaded Checkpoint""" , font_size=24 ) _A : Optional[int] = Group(_a , _a ).arrange(_a , aligned_edge=_a , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _A : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A : int = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_a , _a ) _A : List[str] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _A : Optional[int] = MarkupText( F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_a ) , Write(_a ) ) self.play(Write(_a , run_time=1 ) , Create(_a , run_time=1 ) ) _A : Optional[int] = [] _A : Dict = [] for i, rect in enumerate(_a ): _A : int = fill.copy().set_fill(_a , opacity=0.7 ) target.move_to(_a ) first_animations.append(GrowFromCenter(_a , run_time=1 ) ) _A : Optional[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_a , run_time=1.5 ) ) self.play(*_a ) self.play(*_a ) self.wait()
26
"""simple docstring""" UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase__ : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
25
0
'''simple docstring''' __lowercase : dict[tuple[int, int, int], int] = {} def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __a : List[str] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __a : Union[str, Any] = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __a : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __a : Tuple = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 ) __a : Dict = state_late + state_absent + state_ontime __a : str = prizestrings return prizestrings def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 30 ): return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
27
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
'''simple docstring''' def __lowerCamelCase ( A__ = 1_000_000 ) -> int: """simple docstring""" UpperCamelCase = 1 UpperCamelCase = 1 UpperCamelCase = {1: 1} for inputa in range(2 , A__ ): UpperCamelCase = 0 UpperCamelCase = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: UpperCamelCase = (3 * number) + 1 counter += 1 if inputa not in counters: UpperCamelCase = counter if counter > pre_counter: UpperCamelCase = inputa UpperCamelCase = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
28
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return {}, {}, {} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = image.size SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" ) SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Any = predicted_depth SCREAMING_SNAKE_CASE__ : Dict = depth return output_dict
25
0
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None: warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
29
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
0
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def a ( snake_case__: Dict , snake_case__: str , snake_case__: List[str] ): '''simple docstring''' lowercase_ = 1.5 lowercase_ = int(factor * num_class_images ) lowercase_ = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ = client.query(text=snake_case__ ) if len(snake_case__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ = int(factor * num_images ) lowercase_ = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 , ) lowercase_ = 0 lowercase_ = 0 lowercase_ = tqdm(desc='''downloading real regularization images''' , total=snake_case__ ) with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open( F'''{class_data_dir}/images.txt''' , '''w''' ) as fa: while total < num_class_images: lowercase_ = class_images[count] count += 1 try: lowercase_ = requests.get(images['''url'''] ) if img.status_code == 200: lowercase_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser('''''' , add_help=snake_case__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case__ ) return parser.parse_args() if __name__ == "__main__": __a = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE__ : int = Accelerator() SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
25
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
31
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" ) SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers( _snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map( _snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_snake_case ,_snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( _snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case ) if isinstance(_snake_case ,_snake_case ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ): if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,): SCREAMING_SNAKE_CASE__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : Any = [] current_key_name.append(_snake_case ) if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) SCREAMING_SNAKE_CASE__ : str = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( _snake_case ): # Create a copy of the model with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Optional[int] = False if hasattr(_snake_case ,"""base_model_prefix""" ): SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" ) filtered_module_names.append(_snake_case ) return filtered_module_names def lowercase_ ( _snake_case ): for m in model.modules(): if isinstance(_snake_case ,bnb.nn.Linearabit ): return True return False def lowercase_ ( _snake_case ): return next(parameter.parameters() ).device def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : List[Any] = False offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case ) if hasattr(module._parameters[tensor_name] ,"""SCB""" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,) else: offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case ) offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ) set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
25
0
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Any ) -> str: a_ : Any = 'laion/clap-htsat-unfused' a_ : Tuple = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.get_tokenizer() a_ : int = self.get_feature_extractor() a_ : Union[str, Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(self.tmpdirname ) a_ : Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) a_ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) a_ : int = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) a_ : Optional[Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: a_ : Union[str, Any] = self.get_feature_extractor() a_ : Union[str, Any] = self.get_tokenizer() a_ : Optional[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) a_ : str = floats_list((3, 1_0_0_0) ) a_ : Any = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='np' ) a_ : List[Any] = processor(audios=SCREAMING_SNAKE_CASE__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : Union[str, Any] = self.get_feature_extractor() a_ : Dict = self.get_tokenizer() a_ : Tuple = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'This is a test string' a_ : List[str] = processor(text=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Any: a_ : Tuple = self.get_feature_extractor() a_ : Any = self.get_tokenizer() a_ : Dict = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a_ : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: a_ : Dict = self.get_feature_extractor() a_ : Optional[Any] = self.get_tokenizer() a_ : Optional[int] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
32
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case ) SCREAMING_SNAKE_CASE__ : int = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
0
"""simple docstring""" from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def lowercase ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int , __snake_case : int ): lowercase_ : Optional[int] = cva.getAffineTransform(__snake_case , __snake_case ) return cva.warpAffine(__snake_case , __snake_case , (rows, cols) ) if __name__ == "__main__": # read original image __A : Tuple = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __A , __A : Union[str, Any] = gray_img.shape # set different points to rotate image __A : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __A : int = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __A : str = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __A : Union[str, Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __A : Optional[int] = plt.figure(1) __A : int = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
33
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """""" # apply OCR SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : List[Any] = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : List[Any] = resample SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Dict = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) words_batch.append(SCREAMING_SNAKE_CASE__ ) boxes_batch.append(SCREAMING_SNAKE_CASE__ ) if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : List[Any] = words_batch SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch return data
25
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ (_a : Tuple , _a : Tuple , _a : Any ): # Initialise PyTorch model UpperCAmelCase = LxmertConfig.from_json_file(_a ) print(F"Building PyTorch model from configuration: {config}" ) UpperCAmelCase = LxmertForPreTraining(_a ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_a , _a , _a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
34
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Optional[int] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : int = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2 return dft[0] def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" ) SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Any = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
0
'''simple docstring''' import torch from torch import nn class UpperCAmelCase_ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1 , snake_case_ : Tuple=False ): super().__init__() snake_case__ : List[str] = n_token snake_case__ : Optional[int] = d_embed snake_case__ : Any = d_proj snake_case__ : Any = cutoffs + [n_token] snake_case__ : Optional[int] = [0] + self.cutoffs snake_case__ : Union[str, Any] = div_val snake_case__ : Optional[Any] = self.cutoffs[0] snake_case__ : int = len(self.cutoffs ) - 1 snake_case__ : int = self.shortlist_size + self.n_clusters if self.n_clusters > 0: snake_case__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) snake_case__ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) ) snake_case__ : Any = nn.ModuleList() snake_case__ : List[str] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) ) else: self.out_projs.append(snake_case_ ) self.out_layers.append(nn.Linear(snake_case_ , snake_case_ ) ) else: for i in range(len(self.cutoffs ) ): snake_case__ , snake_case__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case__ : List[Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) ) self.out_layers.append(nn.Linear(snake_case_ , r_idx - l_idx ) ) snake_case__ : int = keep_order def lowerCamelCase ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] ): if proj is None: snake_case__ : List[str] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: snake_case__ : int = nn.functional.linear(snake_case_ , proj.t().contiguous() ) snake_case__ : Optional[Any] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def lowerCamelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : str=False ): if labels is not None: # Shift so that tokens < n predict n snake_case__ : Dict = hidden[..., :-1, :].contiguous() snake_case__ : Any = labels[..., 1:].contiguous() snake_case__ : Tuple = hidden.view(-1 , hidden.size(-1 ) ) snake_case__ : Optional[Any] = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" ) else: snake_case__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: snake_case__ : str = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: snake_case__ : Dict = labels != -100 snake_case__ : Any = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device ) snake_case__ : int = ( -nn.functional.log_softmax(snake_case_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: snake_case__ : str = nn.functional.log_softmax(snake_case_ , dim=-1 ) else: # construct weights and biases snake_case__ , snake_case__ : Optional[Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: snake_case__ , snake_case__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case__ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx] snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx] else: snake_case__ : Any = self.out_layers[i].weight snake_case__ : Optional[int] = self.out_layers[i].bias if i == 0: snake_case__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) snake_case__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(snake_case_ ) biases.append(snake_case_ ) snake_case__ , snake_case__ , snake_case__ : Any = weights[0], biases[0], self.out_projs[0] snake_case__ : Dict = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) snake_case__ : List[str] = nn.functional.log_softmax(snake_case_ , dim=1 ) if labels is None: snake_case__ : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: snake_case__ : Optional[int] = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device ) snake_case__ : List[str] = 0 snake_case__ : Dict = [0] + self.cutoffs for i in range(len(snake_case_ ) - 1 ): snake_case__ , snake_case__ : int = cutoff_values[i], cutoff_values[i + 1] if labels is not None: snake_case__ : str = (labels >= l_idx) & (labels < r_idx) snake_case__ : Union[str, Any] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue snake_case__ : List[Any] = labels.index_select(0 , snake_case_ ) - l_idx snake_case__ : Dict = head_logprob.index_select(0 , snake_case_ ) snake_case__ : List[str] = hidden.index_select(0 , snake_case_ ) else: snake_case__ : List[str] = hidden if i == 0: if labels is not None: snake_case__ : str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: snake_case__ : Tuple = head_logprob[:, : self.cutoffs[0]] else: snake_case__ , snake_case__ , snake_case__ : Dict = weights[i], biases[i], self.out_projs[i] snake_case__ : Optional[Any] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = nn.functional.log_softmax(snake_case_ , dim=1 ) snake_case__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: snake_case__ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: snake_case__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i snake_case__ : List[Any] = logprob_i if labels is not None: if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order: out.index_copy_(0 , snake_case_ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict ): if self.n_clusters == 0: snake_case__ : int = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(snake_case_ , dim=-1 ) else: # construct weights and biases snake_case__ , snake_case__ : Union[str, Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: snake_case__ , snake_case__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx] snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx] else: snake_case__ : List[Any] = self.out_layers[i].weight snake_case__ : List[str] = self.out_layers[i].bias if i == 0: snake_case__ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) snake_case__ : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(snake_case_ ) biases.append(snake_case_ ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = weights[0], biases[0], self.out_projs[0] snake_case__ : List[str] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) snake_case__ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) ) snake_case__ : Optional[Any] = nn.functional.log_softmax(snake_case_ , dim=1 ) snake_case__ : Tuple = [0] + self.cutoffs for i in range(len(snake_case_ ) - 1 ): snake_case__ , snake_case__ : Optional[int] = cutoff_values[i], cutoff_values[i + 1] if i == 0: snake_case__ : int = head_logprob[:, : self.cutoffs[0]] else: snake_case__ , snake_case__ , snake_case__ : Optional[int] = weights[i], biases[i], self.out_projs[i] snake_case__ : str = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Optional[int] = nn.functional.log_softmax(snake_case_ , dim=1 ) snake_case__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i snake_case__ : int = logprob_i return out
35
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" ,type=_snake_case ,help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) ,) # rest from the training program parser.add_argument("""training_script_args""" ,nargs=_snake_case ) return parser.parse_args() def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : int = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ : int = script_fpath.stem SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case ) # Patch sys.argv SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
25
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _snake_case = logging.getLogger(__name__) class UpperCAmelCase_ ( a): def __init__( self, __a=-1): '''simple docstring''' _lowerCAmelCase : Optional[int] = label_idx def snake_case__ ( self, __a, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : Dict = mode.value _lowerCAmelCase : Optional[int] = os.path.join(__a, f"{mode}.txt") _lowerCAmelCase : Optional[int] = 1 _lowerCAmelCase : str = [] with open(__a, encoding="utf-8") as f: _lowerCAmelCase : Any = [] _lowerCAmelCase : int = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a)) guid_index += 1 _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Any = [] else: _lowerCAmelCase : int = line.split(" ") words.append(splits[0]) if len(__a) > 1: labels.append(splits[self.label_idx].replace("\n", "")) else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a)) return examples def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Any = 0 for line in test_input_reader: if line.startswith("-DOCSTART-") or line == "" or line == "\n": writer.write(__a) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _lowerCAmelCase : int = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n" writer.write(__a) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0]) def snake_case__ ( self, __a): '''simple docstring''' if path: with open(__a, "r") as f: _lowerCAmelCase : Dict = f.read().splitlines() if "O" not in labels: _lowerCAmelCase : List[Any] = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCAmelCase_ ( a): def __init__( self): '''simple docstring''' super().__init__(label_idx=-2) def snake_case__ ( self, __a): '''simple docstring''' if path: with open(__a, "r") as f: _lowerCAmelCase : Any = f.read().splitlines() if "O" not in labels: _lowerCAmelCase : Optional[Any] = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCAmelCase_ ( a): def snake_case__ ( self, __a, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : int = mode.value _lowerCAmelCase : List[str] = os.path.join(__a, f"{mode}.txt") _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = [] with open(__a, encoding="utf-8") as f: for sentence in parse_incr(__a): _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Union[str, Any] = [] for token in sentence: words.append(token["form"]) labels.append(token["upos"]) assert len(__a) == len(__a) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a)) guid_index += 1 return examples def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = 0 for sentence in parse_incr(__a): _lowerCAmelCase : List[Any] = preds_list[example_id] _lowerCAmelCase : Union[str, Any] = "" for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) " out += "\n" writer.write(__a) example_id += 1 def snake_case__ ( self, __a): '''simple docstring''' if path: with open(__a, "r") as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
36
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): return 1 if input_a == input_a else 0 def lowercase_ ( ): assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=8 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=16 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2 ,__UpperCAmelCase=36 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> Dict: lowerCAmelCase__ : Dict = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : str = use_input_mask lowerCAmelCase__ : Any = use_token_type_ids lowerCAmelCase__ : Union[str, Any] = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : Tuple = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Union[str, Any] = intermediate_size lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Optional[int] = type_sequence_label_size lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Dict = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : str = scope def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : Union[str, Any] = None if self.use_input_mask: lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Tuple = None if self.use_token_type_ids: lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : int = None lowerCAmelCase__ : str = None lowerCAmelCase__ : int = None if self.use_labels: lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ) -> int: return MraConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Dict = self.get_config() lowerCAmelCase__ : Union[str, Any] = 300 return config def UpperCAmelCase_ ( self ) -> Optional[int]: ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : int = self.prepare_config_and_inputs() lowerCAmelCase__ : int = True lowerCAmelCase__ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int: lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Union[str, Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,) lowerCAmelCase__ : str = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,) lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: lowerCAmelCase__ : Tuple = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,start_positions=__UpperCAmelCase ,end_positions=__UpperCAmelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = self.num_labels lowerCAmelCase__ : Optional[Any] = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = self.num_choices lowerCAmelCase__ : Optional[Any] = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : Tuple = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[Any] = config_and_inputs lowerCAmelCase__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Dict = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __lowercase : str = False __lowercase : Union[str, Any] = False __lowercase : Optional[Any] = False __lowercase : int = False __lowercase : int = () def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : List[str] = MraModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 ) def UpperCAmelCase_ ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase__ : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Any = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: return @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) lowerCAmelCase__ : List[Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : int = model(__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[int] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : str = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) lowerCAmelCase__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[str] = 5_0265 lowerCAmelCase__ : int = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) lowerCAmelCase__ : Optional[int] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[Any] = 5_0265 lowerCAmelCase__ : Tuple = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
37
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib UpperCAmelCase__ : Optional[int] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } UpperCAmelCase__ : List[Any] = logging.WARNING def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowercase_ ( ): return __name__.split(""".""" )[0] def lowercase_ ( ): return logging.getLogger(_get_library_name() ) def lowercase_ ( ): # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowercase_ ( _snake_case = None ): if name is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name() return logging.getLogger(_snake_case ) def lowercase_ ( ): return _get_library_root_logger().getEffectiveLevel() def lowercase_ ( _snake_case ): _get_library_root_logger().setLevel(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Tuple = False def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : str = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None def __iter__(self ) -> int: """simple docstring""" return iter(self._iterator ) def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument return return empty_fn def __enter__(self ) -> Dict: """simple docstring""" return self def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return UpperCAmelCase__ : str = True class lowerCAmelCase_ : """simple docstring""" def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) else: return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase__ : Tuple = _tqdm_cls() def lowercase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : Union[str, Any] = True def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : str = False
25
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters UpperCAmelCase_ : Optional[Any] = (7_20, 12_80) # Height, Width UpperCAmelCase_ : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. UpperCAmelCase_ : int = 1 / 1_00 UpperCAmelCase_ : int = '''''' UpperCAmelCase_ : List[str] = '''''' UpperCAmelCase_ : Optional[Any] = '''''' UpperCAmelCase_ : str = 2_50 def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" UpperCamelCase , UpperCamelCase :int = get_dataset(__magic_name__ , __magic_name__ ) for index in range(__magic_name__ ): UpperCamelCase :int = random.sample(range(len(__magic_name__ ) ) , 4 ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = update_image_and_anno( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , filter_scale=__magic_name__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase :List[Any] = random_chars(32 ) UpperCamelCase :Any = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(f"""{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) UpperCamelCase :List[str] = [] for anno in new_annos: UpperCamelCase :int = anno[3] - anno[1] UpperCamelCase :Optional[int] = anno[4] - anno[2] UpperCamelCase :List[Any] = anno[1] + width / 2 UpperCamelCase :Union[str, Any] = anno[2] + height / 2 UpperCamelCase :int = f"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(__magic_name__ ) with open(f"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> tuple[list, list]: """simple docstring""" UpperCamelCase :Tuple = [] UpperCamelCase :str = [] for label_file in glob.glob(os.path.join(__magic_name__ , """*.txt""" ) ): UpperCamelCase :Optional[Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__magic_name__ ) as in_file: UpperCamelCase :Union[str, Any] = in_file.readlines() UpperCamelCase :Union[str, Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) UpperCamelCase :str = [] for obj_list in obj_lists: UpperCamelCase :Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ ) UpperCamelCase :Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2 UpperCamelCase :int = float(obj[2] ) - float(obj[4] ) / 2 UpperCamelCase :Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2 UpperCamelCase :str = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list[int] , __magic_name__ : tuple[int, int] , __magic_name__ : tuple[float, float] , __magic_name__ : float = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" UpperCamelCase :List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCamelCase :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase :Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase :Dict = int(scale_x * output_size[1] ) UpperCamelCase :List[Any] = int(scale_y * output_size[0] ) UpperCamelCase :Any = [] UpperCamelCase :Optional[int] = [] for i, index in enumerate(__magic_name__ ): UpperCamelCase :Optional[int] = all_img_list[index] path_list.append(__magic_name__ ) UpperCamelCase :str = all_annos[index] UpperCamelCase :List[Any] = cva.imread(__magic_name__ ) if i == 0: # top-left UpperCamelCase :Union[str, Any] = cva.resize(__magic_name__ , (divid_point_x, divid_point_y) ) UpperCamelCase :Tuple = img for bbox in img_annos: UpperCamelCase :str = bbox[1] * scale_x UpperCamelCase :List[str] = bbox[2] * scale_y UpperCamelCase :Dict = bbox[3] * scale_x UpperCamelCase :List[Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCamelCase :int = cva.resize(__magic_name__ , (output_size[1] - divid_point_x, divid_point_y) ) UpperCamelCase :Tuple = img for bbox in img_annos: UpperCamelCase :List[str] = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase :str = bbox[2] * scale_y UpperCamelCase :Tuple = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase :Tuple = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCamelCase :List[str] = cva.resize(__magic_name__ , (divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase :Optional[Any] = img for bbox in img_annos: UpperCamelCase :Tuple = bbox[1] * scale_x UpperCamelCase :Dict = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase :List[Any] = bbox[3] * scale_x UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCamelCase :List[str] = cva.resize( __magic_name__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase :Any = img for bbox in img_annos: UpperCamelCase :Any = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase :List[Any] = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase :List[Any] = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase :List[str] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCamelCase :List[str] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase :int = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
38
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = '''yolos''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] = image_size SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient SCREAMING_SNAKE_CASE__ : int = eos_coefficient class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Dict = version.parse('''1.11''' ) @property def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __magic_name__ (self ) -> float: """simple docstring""" return 1E-4 @property def __magic_name__ (self ) -> int: """simple docstring""" return 12
25
0
from string import ascii_uppercase _a = {str(ord(c) - 55): c for c in ascii_uppercase} def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str: """simple docstring""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) _UpperCAmelCase = '' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while div != 1: _UpperCAmelCase , _UpperCAmelCase = divmod(__lowerCAmelCase , __lowerCAmelCase ) if base >= 11 and 9 < mod < 36: _UpperCAmelCase = ALPHABET_VALUES[str(__lowerCAmelCase )] else: _UpperCAmelCase = str(__lowerCAmelCase ) new_value += actual_value _UpperCAmelCase = num // base _UpperCAmelCase = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(__lowerCAmelCase ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
39
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
"""simple docstring""" import math import unittest def lowercase_ ( _snake_case ): assert isinstance(_snake_case ,_snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
25
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _A : List[Any] ='''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: lowerCamelCase__ : Union[str, Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): lowerCamelCase__ : Optional[int] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() lowerCamelCase__ : Any = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F'Job {i:>2} is {job[0]} at {job[1]}')
41
"""simple docstring""" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5 for _ in range(1 ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"""{ugly_numbers(2_0_0) = }""")
25
0
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = """char""" __lowercase = """bpe""" __lowercase = """wp""" lowercase : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = ["""image_processor""", """char_tokenizer"""] __lowercase = """ViTImageProcessor""" __lowercase = """MgpstrTokenizer""" def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" _snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase_ , ) _snake_case = kwargs.pop('feature_extractor' ) _snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) _snake_case = tokenizer _snake_case = AutoTokenizer.from_pretrained('gpt2' ) _snake_case = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: _snake_case = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case = encodings['input_ids'] return inputs def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case = sequences _snake_case = char_preds.size(0 ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'char' ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'bpe' ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'wp' ) _snake_case = [] _snake_case = [] for i in range(lowerCAmelCase_ ): _snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] _snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] _snake_case = scores.index(max(lowerCAmelCase_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _snake_case = {} _snake_case = final_strs _snake_case = final_scores _snake_case = char_strs _snake_case = bpe_strs _snake_case = wp_strs return out def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if format == DecodeType.CHARACTER: _snake_case = self.char_decode _snake_case = 1 _snake_case = '[s]' elif format == DecodeType.BPE: _snake_case = self.bpe_decode _snake_case = 2 _snake_case = '#' elif format == DecodeType.WORDPIECE: _snake_case = self.wp_decode _snake_case = 1_02 _snake_case = '[SEP]' else: raise ValueError(F'Format {format} is not supported.' ) _snake_case , _snake_case = [], [] _snake_case = pred_logits.size(0 ) _snake_case = pred_logits.size(1 ) _snake_case , _snake_case = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ ) _snake_case = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:] _snake_case = decoder(lowerCAmelCase_ ) _snake_case , _snake_case = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 ) _snake_case = preds_max_prob[:, 1:] for index in range(lowerCAmelCase_ ): _snake_case = preds_str[index].find(lowerCAmelCase_ ) _snake_case = preds_str[index][:pred_eos] _snake_case = preds_index[index].cpu().tolist() _snake_case = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1 _snake_case = preds_max_prob[index][: pred_eos_index + 1] _snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCAmelCase_ ) conf_scores.append(lowerCAmelCase_ ) return dec_strs, conf_scores def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs
42
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride SCREAMING_SNAKE_CASE__ : Any = time_stride SCREAMING_SNAKE_CASE__ : Optional[int] = max_length SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
25
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowerCamelCase_ : '''simple docstring''' a__ : List[str] a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = None a__ : str = field(default="""Translation""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def __call__( self) -> List[Any]: return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def UpperCamelCase__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value('''string''') for k in sorted(self.languages)} @dataclass class lowerCamelCase_ : '''simple docstring''' a__ : Optional[List] = None a__ : Optional[int] = None a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = None a__ : str = field(default="""TranslationVariableLanguages""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :int = sorted(set(self.languages)) if self.languages else None __UpperCamelCase :List[Any] = len(self.languages) if self.languages else None def __call__( self) -> Tuple: return pa.struct({'''language''': pa.list_(pa.string()), '''translation''': pa.list_(pa.string())}) def UpperCamelCase__ ( self , __lowercase) -> int: __UpperCamelCase :Any = set(self.languages) if self.languages and set(__lowercase) - lang_set: raise ValueError( f"""Some languages in example ({', '.join(sorted(set(__lowercase) - lang_set))}) are not in valid set ({', '.join(__lowercase)}).""") # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __UpperCamelCase :List[Any] = [] for lang, text in translation_dict.items(): if isinstance(__lowercase , __lowercase): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. __UpperCamelCase , __UpperCamelCase :Dict = zip(*sorted(__lowercase)) return {"language": languages, "translation": translations} def UpperCamelCase__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value('''string''')), "translation": Sequence(Value('''string''')), }
43
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
0
"""simple docstring""" # Algorithm for the pigeonhole sorting def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict: _lowerCAmelCase : Any = min(_lowerCamelCase ) # min() finds the minimum value _lowerCAmelCase : Dict = max(_lowerCamelCase ) # max() finds the maximum value _lowerCAmelCase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size _lowerCAmelCase : Optional[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_lowerCamelCase ,_lowerCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. _lowerCAmelCase : Tuple = 0 for count in range(_lowerCamelCase ): while holes[count] > 0: holes[count] -= 1 _lowerCAmelCase : Optional[int] = count + min_val i += 1 def SCREAMING_SNAKE_CASE ( ) -> List[Any]: _lowerCAmelCase : List[str] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_lowerCamelCase ) print("""Sorted order is:""" ,""" """.join(_lowerCamelCase ) ) if __name__ == "__main__": main()
44
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase_ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Union[PIL.Image.Image, np.ndarray] class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , _a , _a , _a , _a , _a , ): super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ): if latents is None: __a = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __a = latents.to(_a ) __a = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _a=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __a = torch.device(f'''cuda:{gpu_id}''' ) __a = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def __UpperCAmelCase ( self ): if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _a , _a , _a , _a , ): if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): __a = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): __a = self.image_processor(_a , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) __a = image.to(dtype=self.image_encoder.dtype , device=_a ) __a = self.image_encoder(_a )['''last_hidden_state'''] __a = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 __a = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: __a = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ): if isinstance(_a , PIL.Image.Image ): __a = 1 elif isinstance(_a , torch.Tensor ): __a = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): __a = len(_a ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) __a = self._execution_device __a = batch_size * num_images_per_prompt __a = guidance_scale > 1.0 __a = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) __a = self.scheduler.timesteps __a = self.prior.config.num_embeddings __a = self.prior.config.embedding_dim __a = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim __a = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance __a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a = self.scheduler.scale_model_input(_a , _a ) __a = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance __a , __a = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: __a , __a = noise_pred.chunk(2 ) __a = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) __a = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) __a = [] for i, latent in enumerate(_a ): print() __a = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) __a = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) __a = images.cpu().numpy() if output_type == "pil": __a = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
45
"""simple docstring""" UpperCAmelCase__ : List[str] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] UpperCAmelCase__ : Tuple = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] UpperCAmelCase__ : Union[str, Any] = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
25
0
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowercase ( _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = RoCBertTokenizer _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = filter_non_english def _snake_case ( self ) -> Tuple: super().setUp() lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowerCAmelCase = {} lowerCAmelCase = {} for i, value in enumerate(lowercase ): lowerCAmelCase = i lowerCAmelCase = i lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer: json.dump(lowercase , lowercase , ensure_ascii=lowercase ) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer: json.dump(lowercase , lowercase , ensure_ascii=lowercase ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase = tokenizer.tokenize("""你好[SEP]你是谁""" ) self.assertListEqual(lowercase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase ) , [5, 6, 2, 5, 7, 8] ) def _snake_case ( self ) -> List[str]: lowerCAmelCase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _snake_case ( self ) -> Dict: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self ) -> Any: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self ) -> Dict: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self ) -> List[str]: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCAmelCase = {} for i, token in enumerate(lowercase ): lowerCAmelCase = i lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=lowercase , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def _snake_case ( self ) -> int: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _snake_case ( self ) -> int: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _snake_case ( self ) -> int: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) if self.test_rust_tokenizer: lowerCAmelCase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) def _snake_case ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) lowerCAmelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowerCAmelCase = tokenizer_r.encode_plus( lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , ) lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(lowercase , """do_lower_case""" ) else False lowerCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def _snake_case ( self ) -> List[Any]: lowerCAmelCase = ["""的""", """人""", """有"""] lowerCAmelCase = """""".join(lowercase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase = True lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase ) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase ) lowerCAmelCase = False lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase ) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase ) ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase ) @slow def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase = tokenizer.encode("""你好""" , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.encode("""你是谁""" , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCAmelCase = """你好,你是谁""" lowerCAmelCase = tokenizer.tokenize(lowercase ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase ) lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(lowercase ) lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(lowercase ) lowerCAmelCase = tokenizer.prepare_for_model( lowercase , lowercase , lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.encode_plus(lowercase , add_special_tokens=lowercase ) self.assertEqual(lowercase , lowercase )
46
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : int = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ["GLPNFeatureExtractor"] lowerCamelCase : Optional[int] = ["GLPNImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : List[Any] = logging.getLogger() def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case ) Path(_snake_case ).open("""w""" ).writelines(_snake_case ) UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart' UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCAmelCase_ (a__ ): """simple docstring""" def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : Optional[Any] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def __magic_name__ (self ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : Any = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] ) SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : List[Any] = F''' run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] SCREAMING_SNAKE_CASE__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
25
0
import argparse import os import re SCREAMING_SNAKE_CASE__ : Any = 'src/transformers' # Pattern that looks at the indentation in a line. SCREAMING_SNAKE_CASE__ : Any = re.compile(r'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : List[str] = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : str = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. SCREAMING_SNAKE_CASE__ : int = re.compile(r'\[([^\]]+)\]') def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : List[str] = _re_indent.search(_SCREAMING_SNAKE_CASE ) return "" if search is None else search.groups()[0] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Tuple: lowerCamelCase : Optional[Any] = 0 lowerCamelCase : Any = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(_SCREAMING_SNAKE_CASE ): index += 1 lowerCamelCase : List[Any] = ["\n".join(lines[:index] )] else: lowerCamelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase : Union[str, Any] = [lines[index]] index += 1 while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) ) if index < len(_SCREAMING_SNAKE_CASE ) - 1: lowerCamelCase : Any = [lines[index + 1]] index += 1 else: lowerCamelCase : Any = [] else: blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_SCREAMING_SNAKE_CASE ) > 0: blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ): blocks.append("\n".join(lines[index:] ) ) return blocks def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: def _inner(_SCREAMING_SNAKE_CASE ): return key(_SCREAMING_SNAKE_CASE ).lower().replace("_" ,"" ) return _inner def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: # If no key is provided, we use a noop. def noop(_SCREAMING_SNAKE_CASE ): return x if key is None: lowerCamelCase : List[str] = noop # Constants are all uppercase, they go first. lowerCamelCase : int = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase : Optional[int] = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()] lowerCamelCase : Union[str, Any] = ignore_underscore(_SCREAMING_SNAKE_CASE ) return sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]: # This inner function sort imports between [ ]. def _replace(_SCREAMING_SNAKE_CASE ): lowerCamelCase : Optional[Any] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowerCamelCase : int = [part.strip().replace("\"" ,"" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase : Optional[int] = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]" lowerCamelCase : List[str] = import_statement.split("\n" ) if len(_SCREAMING_SNAKE_CASE ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase : List[str] = 2 if lines[1].strip() == "[" else 1 lowerCamelCase : Union[str, Any] = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase : str = sort_objects(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ) lowerCamelCase : str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_SCREAMING_SNAKE_CASE ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase : Optional[int] = _re_bracket_content.sub(_replace ,lines[1] ) else: lowerCamelCase : List[str] = [part.strip().replace("\"" ,"" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase : Any = keys[:-1] lowerCamelCase : Any = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) return "\n".join(_SCREAMING_SNAKE_CASE ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase : Dict = _re_bracket_content.sub(_replace ,_SCREAMING_SNAKE_CASE ) return import_statement def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as f: lowerCamelCase : Union[str, Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase : Optional[int] = split_code_in_indented_blocks( _SCREAMING_SNAKE_CASE ,start_prompt="_import_structure = {" ,end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase : int = main_blocks[block_idx] lowerCamelCase : Union[str, Any] = block.split("\n" ) # Get to the start of the imports. lowerCamelCase : List[str] = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE ) else: line_idx += 1 if line_idx >= len(_SCREAMING_SNAKE_CASE ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase : Optional[int] = "\n".join(block_lines[line_idx:-1] ) lowerCamelCase : Optional[int] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase : Dict = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE ,indent_level=_SCREAMING_SNAKE_CASE ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase : Optional[Any] = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None] lowerCamelCase : str = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase : List[Any] = 0 lowerCamelCase : Optional[Any] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: lowerCamelCase : Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_SCREAMING_SNAKE_CASE ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase : Dict = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_SCREAMING_SNAKE_CASE ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(_SCREAMING_SNAKE_CASE ) ) def A ( _SCREAMING_SNAKE_CASE=True ) -> Any: lowerCamelCase : Optional[Any] = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: lowerCamelCase : Any = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE ,"__init__.py" ) ,check_only=_SCREAMING_SNAKE_CASE ) if result: lowerCamelCase : Optional[Any] = [os.path.join(_SCREAMING_SNAKE_CASE ,"__init__.py" )] if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError(f'''Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
48
"""simple docstring""" UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase__ : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
25
0
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = (EulerDiscreteScheduler,) UpperCamelCase__ : List[str] = 10 def _lowerCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = { '''num_train_timesteps''': 1_100, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : int): '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(self.num_inference_steps) __a = torch.manual_seed(0) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma __a = sample.to(__SCREAMING_SNAKE_CASE) for i, t in enumerate(scheduler.timesteps): __a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE) __a = output.prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 10.08_07) < 1E-2 assert abs(result_mean.item() - 0.01_31) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(self.num_inference_steps) __a = torch.manual_seed(0) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma __a = sample.to(__SCREAMING_SNAKE_CASE) for i, t in enumerate(scheduler.timesteps): __a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE) __a = output.prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 0.00_02) < 1E-2 assert abs(result_mean.item() - 2.2676E-06) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE) __a = torch.manual_seed(0) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __a = sample.to(__SCREAMING_SNAKE_CASE) for t in scheduler.timesteps: __a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE) __a = output.prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 10.08_07) < 1E-2 assert abs(result_mean.item() - 0.01_31) < 1E-3 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE) __a = torch.manual_seed(0) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __a = sample.to(__SCREAMING_SNAKE_CASE) for t in scheduler.timesteps: __a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE) __a = output.prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19) < 1E-2 assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63) < 1E-3
49
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { """configuration_xlm_roberta_xl""": [ """XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaXLConfig""", """XLMRobertaXLOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaXLForCausalLM""", """XLMRobertaXLForMaskedLM""", """XLMRobertaXLForMultipleChoice""", """XLMRobertaXLForQuestionAnswering""", """XLMRobertaXLForSequenceClassification""", """XLMRobertaXLForTokenClassification""", """XLMRobertaXLModel""", """XLMRobertaXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
50
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return {}, {}, {} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = image.size SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" ) SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Any = predicted_depth SCREAMING_SNAKE_CASE__ : Dict = depth return output_dict
25
0
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean snake_case_ : str = 0 snake_case_ : Union[str, Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right snake_case_ : List[Any] = tuple[int, int] class __snake_case : def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ): """simple docstring""" UpperCAmelCase_ = pos_x UpperCAmelCase_ = pos_y UpperCAmelCase_ = (pos_y, pos_x) UpperCAmelCase_ = goal_x UpperCAmelCase_ = goal_y UpperCAmelCase_ = g_cost UpperCAmelCase_ = parent UpperCAmelCase_ = self.calculate_heuristic() UpperCAmelCase_ = self.g_cost + self.h_cost def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.pos_x - self.goal_x UpperCAmelCase_ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_snake_case) + abs(_snake_case) else: return sqrt(dy**2 + dx**2) def __lt__( self : Union[str, Any] , _snake_case : Node): """simple docstring""" return self.f_cost < other.f_cost class __snake_case : def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition): """simple docstring""" UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case) UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case) UpperCAmelCase_ = [self.start] UpperCAmelCase_ = [] UpperCAmelCase_ = False def lowerCamelCase ( self : Optional[int]): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(_snake_case) self.closed_nodes.append(_snake_case) UpperCAmelCase_ = self.get_successors(_snake_case) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_snake_case) else: # retrieve the best current path UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_snake_case) else: self.open_nodes.append(_snake_case) return [self.start.pos] def lowerCamelCase ( self : Tuple , _snake_case : Node): """simple docstring""" UpperCAmelCase_ = [] for action in delta: UpperCAmelCase_ = parent.pos_x + action[1] UpperCAmelCase_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , )) return successors def lowerCamelCase ( self : Any , _snake_case : Node | None): """simple docstring""" UpperCAmelCase_ = node UpperCAmelCase_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) UpperCAmelCase_ = current_node.parent path.reverse() return path class __snake_case : def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition): """simple docstring""" UpperCAmelCase_ = AStar(_snake_case , _snake_case) UpperCAmelCase_ = AStar(_snake_case , _snake_case) UpperCAmelCase_ = False def lowerCamelCase ( self : List[Any]): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0) UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _snake_case , _snake_case) self.fwd_astar.closed_nodes.append(_snake_case) self.bwd_astar.closed_nodes.append(_snake_case) UpperCAmelCase_ = current_bwd_node UpperCAmelCase_ = current_fwd_node UpperCAmelCase_ = { self.fwd_astar: self.fwd_astar.get_successors(_snake_case), self.bwd_astar: self.bwd_astar.get_successors(_snake_case), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_snake_case) else: # retrieve the best current path UpperCAmelCase_ = astar.open_nodes.pop( astar.open_nodes.index(_snake_case)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_snake_case) else: astar.open_nodes.append(_snake_case) return [self.fwd_astar.start.pos] def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node): """simple docstring""" UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case) UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] snake_case_ : Any = (0, 0) snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) snake_case_ : str = time.time() snake_case_ : List[str] = AStar(init, goal) snake_case_ : Optional[int] = a_star.search() snake_case_ : Optional[Any] = time.time() - start_time print(f"AStar execution time = {end_time:f} seconds") snake_case_ : int = time.time() snake_case_ : Dict = BidirectionalAStar(init, goal) snake_case_ : str = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
51
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
0
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE__ : int = Accelerator() SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
25
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
53
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" ) SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers( _snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map( _snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_snake_case ,_snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( _snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case ) if isinstance(_snake_case ,_snake_case ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ): if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,): SCREAMING_SNAKE_CASE__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : Any = [] current_key_name.append(_snake_case ) if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) SCREAMING_SNAKE_CASE__ : str = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( _snake_case ): # Create a copy of the model with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Optional[int] = False if hasattr(_snake_case ,"""base_model_prefix""" ): SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" ) filtered_module_names.append(_snake_case ) return filtered_module_names def lowercase_ ( _snake_case ): for m in model.modules(): if isinstance(_snake_case ,bnb.nn.Linearabit ): return True return False def lowercase_ ( _snake_case ): return next(parameter.parameters() ).device def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : List[Any] = False offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case ) if hasattr(module._parameters[tensor_name] ,"""SCB""" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,) else: offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case ) offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ) set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
25
0
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging a__ : Union[str, Any] = logging.get_logger(__name__) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ ) if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def UpperCAmelCase_ ( self : List[str] ) -> Any: super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , UpperCAmelCase__ , ) @cached_property def UpperCAmelCase_ ( self : List[str] ) -> "torch.device": logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: __SCREAMING_SNAKE_CASE = torch.device("cpu" ) __SCREAMING_SNAKE_CASE = 0 elif is_sagemaker_model_parallel_available(): __SCREAMING_SNAKE_CASE = smp.local_rank() __SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) __SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) __SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank ) __SCREAMING_SNAKE_CASE = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __SCREAMING_SNAKE_CASE = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) __SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank ) __SCREAMING_SNAKE_CASE = 1 if device.type == "cuda": torch.cuda.set_device(UpperCAmelCase__ ) return device @property def UpperCAmelCase_ ( self : Dict ) -> Any: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: return not is_sagemaker_model_parallel_available() @property def UpperCAmelCase_ ( self : Tuple ) -> int: return False
54
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case ) SCREAMING_SNAKE_CASE__ : int = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
0
'''simple docstring''' import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch a_ : List[Any] = True except ImportError: a_ : Tuple = False try: from torch.hub import _get_torch_home a_ : Optional[int] = _get_torch_home() except ImportError: a_ : List[Any] = os.path.expanduser( os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch""")) ) a_ : Dict = os.path.join(torch_cache_home, """transformers""") a_ : Dict = """https://cdn.huggingface.co""" a_ : int = """https://s3.amazonaws.com/models.huggingface.co/bert""" a_ : Optional[Any] = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1]) a_ : Union[str, Any] = os.path.join(PATH, """config.yaml""") a_ : Tuple = os.path.join(PATH, """attributes.txt""") a_ : int = os.path.join(PATH, """objects.txt""") a_ : int = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path) a_ : Optional[Any] = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE) a_ : Tuple = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE) a_ : Dict = """pytorch_model.bin""" a_ : str = """config.yaml""" def __snake_case ( UpperCAmelCase_ : Union[str, Any]=OBJECTS , UpperCAmelCase_ : int=ATTRIBUTES ): lowerCamelCase_ = [] with open(UpperCAmelCase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) lowerCamelCase_ = [] with open(UpperCAmelCase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( UpperCAmelCase_ : Dict ): lowerCamelCase_ = OrderedDict() with open(UpperCAmelCase_ , "rb" ) as f: lowerCamelCase_ = pkl.load(UpperCAmelCase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): lowerCamelCase_ = ckp.pop(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , np.ndarray ): lowerCamelCase_ = torch.tensor(UpperCAmelCase_ ) else: assert isinstance(UpperCAmelCase_ , torch.tensor ), type(UpperCAmelCase_ ) lowerCamelCase_ = v return r class snake_case : """simple docstring""" _lowerCamelCase = {} def __init__( self , UpperCamelCase , UpperCamelCase = "root" , UpperCamelCase=0 ): """simple docstring""" lowerCamelCase_ = name lowerCamelCase_ = level lowerCamelCase_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() lowerCamelCase_ = copy.deepcopy(UpperCamelCase ) lowerCamelCase_ = copy.deepcopy(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = Config(UpperCamelCase , name=UpperCamelCase , level=level + 1 ) lowerCamelCase_ = v setattr(self , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = d def __repr__( self ): """simple docstring""" return str(list((self._pointer.keys()) ) ) def __setattr__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = val lowerCamelCase_ = val lowerCamelCase_ = key.split("." ) lowerCamelCase_ = len(UpperCamelCase ) - 1 lowerCamelCase_ = self._pointer if len(UpperCamelCase ) > 1: for i, l in enumerate(UpperCamelCase ): if hasattr(self , UpperCamelCase ) and isinstance(getattr(self , UpperCamelCase ) , UpperCamelCase ): setattr(getattr(self , UpperCamelCase ) , ".".join(levels[i:] ) , UpperCamelCase ) if l == last_level: lowerCamelCase_ = val else: lowerCamelCase_ = pointer[l] def snake_case ( self ): """simple docstring""" return self._pointer def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCamelCase , UpperCamelCase ) @staticmethod def snake_case ( UpperCamelCase ): """simple docstring""" with open(UpperCamelCase ) as stream: lowerCamelCase_ = load(UpperCamelCase , Loader=UpperCamelCase ) return data def __str__( self ): """simple docstring""" lowerCamelCase_ = " " if self._name != "root": lowerCamelCase_ = f'''{t * (self._level-1)}{self._name}:\n''' else: lowerCamelCase_ = "" lowerCamelCase_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCamelCase , UpperCamelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCamelCase ).__name__})\n''' lowerCamelCase_ = level return r[:-1] @classmethod def snake_case ( cls , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase ) return cls(UpperCamelCase ) @classmethod def snake_case ( cls , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = kwargs.pop("cache_dir" , UpperCamelCase ) lowerCamelCase_ = kwargs.pop("force_download" , UpperCamelCase ) lowerCamelCase_ = kwargs.pop("resume_download" , UpperCamelCase ) lowerCamelCase_ = kwargs.pop("proxies" , UpperCamelCase ) lowerCamelCase_ = kwargs.pop("local_files_only" , UpperCamelCase ) if os.path.isdir(UpperCamelCase ): lowerCamelCase_ = os.path.join(UpperCamelCase , UpperCamelCase ) elif os.path.isfile(UpperCamelCase ) or is_remote_url(UpperCamelCase ): lowerCamelCase_ = pretrained_model_name_or_path else: lowerCamelCase_ = hf_bucket_url(UpperCamelCase , filename=UpperCamelCase , use_cdn=UpperCamelCase ) try: # Load from URL or cache if already cached lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError lowerCamelCase_ = Config.load_yaml(UpperCamelCase ) except EnvironmentError: lowerCamelCase_ = "Can't load config for" raise EnvironmentError(UpperCamelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCamelCase ), kwargs def __snake_case ( UpperCAmelCase_ : Union[str, Any] ): lowerCamelCase_ = torch.load("dump.pt" , map_location=in_tensor.device ) lowerCamelCase_ = in_tensor.numpy() lowerCamelCase_ = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , rtol=0.01 , atol=0.1 ), ( F'''{sum([1 for x in np.isclose(UpperCAmelCase_ , UpperCAmelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = urlparse(UpperCAmelCase_ ) return parsed.scheme in ("http", "https") def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=True ): lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX lowerCamelCase_ = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Tuple=None , ): lowerCamelCase_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): ua += "; " + "; ".join("{}/{}".format(UpperCAmelCase_ , UpperCAmelCase_ ) for k, v in user_agent.items() ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): ua += "; " + user_agent lowerCamelCase_ = {"user-agent": ua} if resume_size > 0: lowerCamelCase_ = "bytes=%d-" % (resume_size,) lowerCamelCase_ = requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ , proxies=UpperCAmelCase_ , headers=UpperCAmelCase_ ) if response.status_code == 416: # Range not satisfiable return lowerCamelCase_ = response.headers.get("Content-Length" ) lowerCamelCase_ = resume_size + int(UpperCAmelCase_ ) if content_length is not None else None lowerCamelCase_ = tqdm( unit="B" , unit_scale=UpperCAmelCase_ , total=UpperCAmelCase_ , initial=UpperCAmelCase_ , desc="Downloading" , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(UpperCAmelCase_ ) ) temp_file.write(UpperCAmelCase_ ) progress.close() def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=False , ): if cache_dir is None: lowerCamelCase_ = TRANSFORMERS_CACHE if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = str(UpperCAmelCase_ ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCamelCase_ = None if not local_files_only: try: lowerCamelCase_ = requests.head(UpperCAmelCase_ , allow_redirects=UpperCAmelCase_ , proxies=UpperCAmelCase_ , timeout=UpperCAmelCase_ ) if response.status_code == 200: lowerCamelCase_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass lowerCamelCase_ = url_to_filename(UpperCAmelCase_ , UpperCAmelCase_ ) # get cache path to put the file lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(UpperCAmelCase_ ): return cache_path else: lowerCamelCase_ = [ file for file in fnmatch.filter(os.listdir(UpperCAmelCase_ ) , filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(UpperCAmelCase_ ) > 0: return os.path.join(UpperCAmelCase_ , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(UpperCAmelCase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lowerCamelCase_ = cache_path + ".lock" with FileLock(UpperCAmelCase_ ): # If the download just completed while the lock was activated. if os.path.exists(UpperCAmelCase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: lowerCamelCase_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(UpperCAmelCase_ , "a+b" ) as f: yield f lowerCamelCase_ = _resumable_file_manager if os.path.exists(UpperCAmelCase_ ): lowerCamelCase_ = os.stat(UpperCAmelCase_ ).st_size else: lowerCamelCase_ = 0 else: lowerCamelCase_ = partial(tempfile.NamedTemporaryFile , dir=UpperCAmelCase_ , delete=UpperCAmelCase_ ) lowerCamelCase_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" , UpperCAmelCase_ , temp_file.name , ) http_get( UpperCAmelCase_ , UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_size=UpperCAmelCase_ , user_agent=UpperCAmelCase_ , ) os.replace(temp_file.name , UpperCAmelCase_ ) lowerCamelCase_ = {"url": url, "etag": etag} lowerCamelCase_ = cache_path + ".json" with open(UpperCAmelCase_ , "w" ) as meta_file: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) return cache_path def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=None ): lowerCamelCase_ = url.encode("utf-8" ) lowerCamelCase_ = shaaaa(UpperCAmelCase_ ) lowerCamelCase_ = url_hash.hexdigest() if etag: lowerCamelCase_ = etag.encode("utf-8" ) lowerCamelCase_ = shaaaa(UpperCAmelCase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=False , ): if cache_dir is None: lowerCamelCase_ = TRANSFORMERS_CACHE if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = str(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = str(UpperCAmelCase_ ) if is_remote_url(UpperCAmelCase_ ): # URL, so get it from the cache (downloading if necessary) lowerCamelCase_ = get_from_cache( UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , user_agent=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , ) elif os.path.exists(UpperCAmelCase_ ): # File, and it exists. lowerCamelCase_ = url_or_filename elif urlparse(UpperCAmelCase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(UpperCAmelCase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCAmelCase_ ) ) if extract_compressed_file: if not is_zipfile(UpperCAmelCase_ ) and not tarfile.is_tarfile(UpperCAmelCase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" lowerCamelCase_ ,lowerCamelCase_ = os.path.split(UpperCAmelCase_ ) lowerCamelCase_ = output_file.replace("." , "-" ) + "-extracted" lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.isdir(UpperCAmelCase_ ) and os.listdir(UpperCAmelCase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions lowerCamelCase_ = output_path + ".lock" with FileLock(UpperCAmelCase_ ): shutil.rmtree(UpperCAmelCase_ , ignore_errors=UpperCAmelCase_ ) os.makedirs(UpperCAmelCase_ ) if is_zipfile(UpperCAmelCase_ ): with ZipFile(UpperCAmelCase_ , "r" ) as zip_file: zip_file.extractall(UpperCAmelCase_ ) zip_file.close() elif tarfile.is_tarfile(UpperCAmelCase_ ): lowerCamelCase_ = tarfile.open(UpperCAmelCase_ ) tar_file.extractall(UpperCAmelCase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(UpperCAmelCase_ ) ) return output_path_extracted return output_path def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]="," ): assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.isfile(UpperCAmelCase_ ): with open(UpperCAmelCase_ ) as f: lowerCamelCase_ = eval(f.read() ) else: lowerCamelCase_ = requests.get(UpperCAmelCase_ ) try: lowerCamelCase_ = requests.json() except Exception: lowerCamelCase_ = req.content.decode() assert data is not None, "could not connect" try: lowerCamelCase_ = eval(UpperCAmelCase_ ) except Exception: lowerCamelCase_ = data.split("\n" ) req.close() return data def __snake_case ( UpperCAmelCase_ : Dict ): lowerCamelCase_ = requests.get(UpperCAmelCase_ ) lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(UpperCAmelCase_ ) with open(UpperCAmelCase_ , "rb" ) as stream: lowerCamelCase_ = pkl.load(UpperCAmelCase_ ) lowerCamelCase_ = weights.pop("model" ) lowerCamelCase_ = {} for k, v in model.items(): lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ) if "running_var" in k: lowerCamelCase_ = torch.tensor([0] ) lowerCamelCase_ = k.replace("running_var" , "num_batches_tracked" ) lowerCamelCase_ = zero return new def __snake_case ( ): print(F'''{os.path.abspath(os.path.join(UpperCAmelCase_ , os.pardir ) )}/demo.ipynb''' ) def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int="RGB" ): assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.isfile(UpperCAmelCase_ ): lowerCamelCase_ = cva.imread(UpperCAmelCase_ ) else: lowerCamelCase_ = get_image_from_url(UpperCAmelCase_ ) assert img is not None, F'''could not connect to: {im}''' lowerCamelCase_ = cva.cvtColor(UpperCAmelCase_ , cva.COLOR_BGR2RGB ) if input_format == "RGB": lowerCamelCase_ = img[:, :, ::-1] return img def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=1 ): return (images[i : i + batch] for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ ))
55
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """""" # apply OCR SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : List[Any] = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : List[Any] = resample SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Dict = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) words_batch.append(SCREAMING_SNAKE_CASE__ ) boxes_batch.append(SCREAMING_SNAKE_CASE__ ) if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : List[Any] = words_batch SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch return data
25
0
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' if len(re.findall('''[ATCG]''', __UpperCAmelCase ) ) != len(__UpperCAmelCase ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
56
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Optional[int] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : int = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2 return dft[0] def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" ) SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Any = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) __lowerCAmelCase = sum(_UpperCamelCase ) / len(_UpperCamelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
57
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" ,type=_snake_case ,help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) ,) # rest from the training program parser.add_argument("""training_script_args""" ,nargs=_snake_case ) return parser.parse_args() def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : int = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ : int = script_fpath.stem SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case ) # Patch sys.argv SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
25
0
'''simple docstring''' def lowerCamelCase ( __lowerCamelCase : int ) ->int: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
58
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): return 1 if input_a == input_a else 0 def lowercase_ ( ): assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __lowerCamelCase = logging.get_logger(__name__) class UpperCAmelCase ( A_ ): A__ : Any = ["pixel_values"] def __init__(self : str , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : str , ) -> None: '''simple docstring''' super().__init__(**snake_case__ ) snake_case : Optional[Any] = do_rescale snake_case : Union[str, Any] = rescale_factor snake_case : str = do_pad snake_case : Union[str, Any] = pad_size def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Union[str, Any] ) -> np.ndarray: '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Optional[int]: '''simple docstring''' snake_case , snake_case : Tuple = get_image_size(snake_case__ ) snake_case : List[Any] = (old_height // size + 1) * size - old_height snake_case : int = (old_width // size + 1) * size - old_width return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : Tuple , ) -> List[str]: '''simple docstring''' snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case : int = do_pad if do_pad is not None else self.do_pad snake_case : Tuple = pad_size if pad_size is not None else self.pad_size snake_case : Tuple = make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. snake_case : List[str] = [to_numpy_array(snake_case__ ) for image in images] if do_rescale: snake_case : Optional[Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] if do_pad: snake_case : List[str] = [self.pad(snake_case__ , size=snake_case__ ) for image in images] snake_case : Any = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] snake_case : Tuple = {"pixel_values": images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
59
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib UpperCAmelCase__ : Optional[int] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } UpperCAmelCase__ : List[Any] = logging.WARNING def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def lowercase_ ( ): return __name__.split(""".""" )[0] def lowercase_ ( ): return logging.getLogger(_get_library_name() ) def lowercase_ ( ): # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowercase_ ( _snake_case = None ): if name is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name() return logging.getLogger(_snake_case ) def lowercase_ ( ): return _get_library_root_logger().getEffectiveLevel() def lowercase_ ( _snake_case ): _get_library_root_logger().setLevel(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): return set_verbosity(_snake_case ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Tuple = False def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : str = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None def __iter__(self ) -> int: """simple docstring""" return iter(self._iterator ) def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument return return empty_fn def __enter__(self ) -> Dict: """simple docstring""" return self def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return UpperCAmelCase__ : str = True class lowerCAmelCase_ : """simple docstring""" def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) else: return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase__ : Tuple = _tqdm_cls() def lowercase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : Union[str, Any] = True def lowercase_ ( ): global _tqdm_active SCREAMING_SNAKE_CASE__ : str = False
25
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : Tuple = inspect.getfile(accelerate.test_utils ) lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def lowerCamelCase__ ( self : int ): print(F'''Found {torch.cuda.device_count()} devices.''' ) lowerCAmelCase : str = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : List[Any] ): print(F'''Found {torch.cuda.device_count()} devices.''' ) lowerCAmelCase : List[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[int] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase__ ( self : Dict ): print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) lowerCAmelCase : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : List[str] = Accelerator() snake_case__ : Dict = (accelerator.state.process_index + 2, 10) snake_case__ : Dict = torch.randint(0, 10, shape).to(accelerator.device) snake_case__ : Any = '''''' snake_case__ : Tuple = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." snake_case__ : Dict = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." snake_case__ : int = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
60
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = '''yolos''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] = image_size SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient SCREAMING_SNAKE_CASE__ : int = eos_coefficient class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Dict = version.parse('''1.11''' ) @property def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __magic_name__ (self ) -> float: """simple docstring""" return 1E-4 @property def __magic_name__ (self ) -> int: """simple docstring""" return 12
25
0
"""simple docstring""" import os import sys _a = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _a = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoConfig.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoTokenizer.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoModel.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoModelForCausalLM.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__lowerCamelCase, **__lowerCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *__lowerCamelCase, **__lowerCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
61
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_00 ): return sum(e for e in range(3 , SCREAMING_SNAKE_CASE__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"""{solution() = }""")
62
"""simple docstring""" import math import unittest def lowercase_ ( _snake_case ): assert isinstance(_snake_case ,_snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
25
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Tuple ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
63
"""simple docstring""" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5 for _ in range(1 ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"""{ugly_numbers(2_0_0) = }""")
25
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(__a ) class lowercase( __a ): '''simple docstring''' lowercase__ = "rag" lowercase__ = True def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ): '''simple docstring''' super().__init__( bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" ) _snake_case : List[str] = question_encoder_config.pop("""model_type""" ) _snake_case : Union[str, Any] = kwargs.pop("""generator""" ) _snake_case : Any = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig _snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Any = reduce_loss _snake_case : Optional[int] = label_smoothing _snake_case : Dict = exclude_bos_score _snake_case : int = do_marginalize _snake_case : Optional[Any] = title_sep _snake_case : Any = doc_sep _snake_case : List[str] = n_docs _snake_case : Tuple = max_combined_length _snake_case : Optional[Any] = dataset _snake_case : Union[str, Any] = dataset_split _snake_case : Tuple = index_name _snake_case : Any = retrieval_vector_size _snake_case : Union[str, Any] = retrieval_batch_size _snake_case : str = passages_path _snake_case : Tuple = index_path _snake_case : List[Any] = use_dummy_dataset _snake_case : Optional[Any] = output_retrieved _snake_case : Tuple = do_deduplication _snake_case : Union[str, Any] = use_cache if self.forced_eos_token_id is None: _snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ ) @classmethod def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : List[str] = self.question_encoder.to_dict() _snake_case : Tuple = self.generator.to_dict() _snake_case : Dict = self.__class__.model_type return output
64
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer''' def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : str = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride SCREAMING_SNAKE_CASE__ : Any = time_stride SCREAMING_SNAKE_CASE__ : Optional[int] = max_length SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
25
0
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __a = logging.get_logger(__name__) __a = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) __a = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) __a = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) __a = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) __a = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) __a = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) __a = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) __a = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) __a = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) __a = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) __a = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) __a = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) __a = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) __a = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_MAPPING __a = auto_class_update(FlaxAutoModel) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING __a = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __a = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING __a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __a = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __a = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __a = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
66
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
0
'''simple docstring''' import tensorflow as tf from ...tf_utils import shape_list class a__ ( tf.keras.layers.Layer ): def __init__( self : Tuple , a : Any , a : Any , a : Optional[Any] , a : Optional[int] , a : str=1 , a : Optional[int]=False , **a : str ): """simple docstring""" super().__init__(**a ) __lowerCamelCase = vocab_size __lowerCamelCase = d_embed __lowerCamelCase = d_proj __lowerCamelCase = cutoffs + [vocab_size] __lowerCamelCase = [0] + self.cutoffs __lowerCamelCase = div_val __lowerCamelCase = self.cutoffs[0] __lowerCamelCase = len(self.cutoffs ) - 1 __lowerCamelCase = self.shortlist_size + self.n_clusters __lowerCamelCase = keep_order __lowerCamelCase = [] __lowerCamelCase = [] def SCREAMING_SNAKE_CASE__ ( self : int , a : int ): """simple docstring""" if self.n_clusters > 0: __lowerCamelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=a , name='''cluster_weight''' ) __lowerCamelCase = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=a , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: __lowerCamelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=a , name=f"""out_projs_._{i}""" , ) self.out_projs.append(a ) else: self.out_projs.append(a ) __lowerCamelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=a , name=f"""out_layers_._{i}_._weight""" , ) __lowerCamelCase = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=a , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): __lowerCamelCase , __lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] __lowerCamelCase = self.d_embed // (self.div_val**i) __lowerCamelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=a , name=f"""out_projs_._{i}""" ) self.out_projs.append(a ) __lowerCamelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=a , name=f"""out_layers_._{i}_._weight""" , ) __lowerCamelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=a , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(a ) @staticmethod def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] , a : Dict , a : Optional[Any] , a : List[str]=None ): """simple docstring""" __lowerCamelCase = x if proj is not None: __lowerCamelCase = tf.einsum('''ibd,ed->ibe''' , a , a ) return tf.einsum('''ibd,nd->ibn''' , a , a ) + b @staticmethod def SCREAMING_SNAKE_CASE__ ( a : List[str] , a : Tuple ): """simple docstring""" __lowerCamelCase = shape_list(a ) __lowerCamelCase = tf.range(lp_size[0] , dtype=target.dtype ) __lowerCamelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(a , a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[int] , a : Dict , a : int=True , a : Dict=False ): """simple docstring""" __lowerCamelCase = 0 if self.n_clusters == 0: __lowerCamelCase = self._logit(a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: __lowerCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a , logits=a ) __lowerCamelCase = tf.nn.log_softmax(a , axis=-1 ) else: __lowerCamelCase = shape_list(a ) __lowerCamelCase = [] __lowerCamelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): __lowerCamelCase , __lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: __lowerCamelCase = (target >= l_idx) & (target < r_idx) __lowerCamelCase = tf.where(a ) __lowerCamelCase = tf.boolean_mask(a , a ) - l_idx if self.div_val == 1: __lowerCamelCase = self.out_layers[0][0][l_idx:r_idx] __lowerCamelCase = self.out_layers[0][1][l_idx:r_idx] else: __lowerCamelCase = self.out_layers[i][0] __lowerCamelCase = self.out_layers[i][1] if i == 0: __lowerCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) __lowerCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) __lowerCamelCase = self._logit(a , a , a , self.out_projs[0] ) __lowerCamelCase = tf.nn.log_softmax(a ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: __lowerCamelCase = tf.boolean_mask(a , a ) __lowerCamelCase = self._gather_logprob(a , a ) else: __lowerCamelCase = self._logit(a , a , a , self.out_projs[i] ) __lowerCamelCase = tf.nn.log_softmax(a ) __lowerCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster __lowerCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(a ) if target is not None: __lowerCamelCase = tf.boolean_mask(a , a ) __lowerCamelCase = tf.boolean_mask(a , a ) __lowerCamelCase = self._gather_logprob(a , a ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(a , -cur_logprob , shape_list(a ) ) __lowerCamelCase = tf.concat(a , axis=-1 ) if target is not None: if return_mean: __lowerCamelCase = tf.reduce_mean(a ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(a ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(a , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
67
"""simple docstring""" UpperCAmelCase__ : List[str] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] UpperCAmelCase__ : int = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] UpperCAmelCase__ : Tuple = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] UpperCAmelCase__ : Union[str, Any] = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] UpperCAmelCase__ : str = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
25
0
from PIL import Image def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image , SCREAMING_SNAKE_CASE_: int ) -> Image: '''simple docstring''' A__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level)) def contrast(SCREAMING_SNAKE_CASE_: int ) -> int: return int(1_2_8 + factor * (c - 1_2_8) ) return img.point(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 lowerCAmelCase__ = change_contrast(img, 1_7_0) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
68
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : List[Any] = logging.getLogger() def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case ) Path(_snake_case ).open("""w""" ).writelines(_snake_case ) UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart' UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCAmelCase_ (a__ ): """simple docstring""" def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : Optional[Any] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def __magic_name__ (self ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() SCREAMING_SNAKE_CASE__ : Any = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" ) SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] ) SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization""" SCREAMING_SNAKE_CASE__ : List[Any] = F''' run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] SCREAMING_SNAKE_CASE__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
25
0
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class UpperCAmelCase ( snake_case_ ): def __init__( self : Any , __snake_case : Any="" , __snake_case : List[Any]="train" ) -> str: assert os.path.isdir(__snake_case ) _lowerCAmelCase = [] _lowerCAmelCase = os.listdir(__snake_case ) for story_filename in story_filenames_list: if "summary" in story_filename: continue _lowerCAmelCase = os.path.join(__snake_case , __snake_case ) if not os.path.isfile(__snake_case ): continue self.documents.append(__snake_case ) def __len__( self : str ) -> List[Any]: return len(self.documents ) def __getitem__( self : List[Any] , __snake_case : Dict ) -> Dict: _lowerCAmelCase = self.documents[idx] _lowerCAmelCase = document_path.split("""/""" )[-1] with open(__snake_case , encoding="""utf-8""" ) as source: _lowerCAmelCase = source.read() _lowerCAmelCase , _lowerCAmelCase = process_story(__snake_case ) return document_name, story_lines, summary_lines def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = list(filter(lambda lowerCAmelCase : len(lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it _lowerCAmelCase = [_add_missing_period(lowerCAmelCase ) for line in nonempty_lines] # gather article lines _lowerCAmelCase = [] _lowerCAmelCase = deque(lowerCAmelCase ) while True: try: _lowerCAmelCase = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(lowerCAmelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines _lowerCAmelCase = list(filter(lambda lowerCAmelCase : not t.startswith("""@highlight""" ) , lowerCAmelCase ) ) return story_lines, summary_lines def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(lowerCAmelCase )) ) return sequence def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = torch.ones_like(lowerCAmelCase ) _lowerCAmelCase = sequence == pad_token_id _lowerCAmelCase = 0 return mask def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in story_lines] _lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] _lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in summary_lines] _lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for sequence in batch: _lowerCAmelCase = -1 _lowerCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(lowerCAmelCase ) return torch.tensor(lowerCAmelCase )
70
"""simple docstring""" UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase__ : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
25
0
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('''Googling.....''') A_ :Tuple = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:]) A_ :str = requests.get(url, headers={'''UserAgent''': UserAgent().random}) # res.raise_for_status() with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A_ :List[Any] = BeautifulSoup(res.text, '''html.parser''') A_ :List[Any] = list(soup.select('''.eZt8xd'''))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('''href''')) else: webbrowser.open(f"https://google.com{link.get('href')}")
71
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE__ ) def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return {}, {}, {} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = image.size SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" ) SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Any = predicted_depth SCREAMING_SNAKE_CASE__ : Dict = depth return output_dict
25
0
import mpmath # for roots of unity import numpy as np class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=None): # Input as list __lowerCamelCase : Optional[int] = list(poly_a or [0])[:] __lowerCamelCase : Dict = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __lowerCamelCase : Optional[int] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() __lowerCamelCase : Any = len(self.polyB) # Add 0 to make lengths equal a power of 2 __lowerCamelCase : int = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform __lowerCamelCase : Tuple = complex(mpmath.root(x=1 ,n=self.c_max_length ,k=1)) # The product __lowerCamelCase : Optional[Any] = self.__multiply() def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : Optional[Any] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__) <= 1: return dft[0] # __lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: __lowerCamelCase : Optional[Any] = [[] for i in range(SCREAMING_SNAKE_CASE__)] __lowerCamelCase : Optional[int] = self.root**next_ncol # First half of next step __lowerCamelCase : Union[str, Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE__): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step __lowerCamelCase : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE__): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update __lowerCamelCase : Union[str, Any] = new_dft __lowerCamelCase : Optional[Any] = next_ncol // 2 return dft[0] def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Optional[int] = self.__dft('A') __lowerCamelCase : Dict = self.__dft('B') __lowerCamelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT __lowerCamelCase : int = 2 while next_ncol <= self.c_max_length: __lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE__)] __lowerCamelCase : Dict = self.root ** (next_ncol // 2) __lowerCamelCase : Union[str, Any] = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update __lowerCamelCase : List[Any] = new_inverse_c next_ncol *= 2 # Unpack __lowerCamelCase : Optional[Any] = [round(x[0].real ,8) + round(x[0].imag ,8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : int): __lowerCamelCase : Union[str, Any] = 'A = ' + ' + '.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A])) __lowerCamelCase : Tuple = 'B = ' + ' + '.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B])) __lowerCamelCase : List[Any] = 'A*B = ' + ' + '.join( F"{coef}*x^{i}" for coef, i in enumerate(self.product)) return F"{a}\n{b}\n{c}" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
73
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
0
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def _snake_case ( snake_case__ : Any , snake_case__ : str , snake_case__ : Dict ): A = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}'
74
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) SCREAMING_SNAKE_CASE__ : int = Accelerator() SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
25
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_snake_case ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Any = load_in_abit SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" ) SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_snake_case ): param.to(_snake_case ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers( _snake_case ,_snake_case ,modules_to_not_convert=_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map( _snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ): if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_snake_case ,_snake_case ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( _snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case ) if isinstance(_snake_case ,_snake_case ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ): if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,): SCREAMING_SNAKE_CASE__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : Any = [] current_key_name.append(_snake_case ) if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : List[str] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) SCREAMING_SNAKE_CASE__ : str = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data bnb_module.requires_grad_(_snake_case ) setattr(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers( _snake_case ,_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase_ ( _snake_case ): # Create a copy of the model with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] ) SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Optional[int] = False if hasattr(_snake_case ,"""base_model_prefix""" ): SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" ) filtered_module_names.append(_snake_case ) return filtered_module_names def lowercase_ ( _snake_case ): for m in model.modules(): if isinstance(_snake_case ,bnb.nn.Linearabit ): return True return False def lowercase_ ( _snake_case ): return next(parameter.parameters() ).device def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : List[Any] = False offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case ) if hasattr(module._parameters[tensor_name] ,"""SCB""" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,) else: offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case ) offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ) set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
25
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case ) SCREAMING_SNAKE_CASE__ : int = len(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
0
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def a_ ( _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Tuple = prime_factors(_lowerCAmelCase ) if is_square_free(_lowerCAmelCase ): return -1 if len(_lowerCAmelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """""" # apply OCR SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE__ : List[Any] = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize SCREAMING_SNAKE_CASE__ : Any = size SCREAMING_SNAKE_CASE__ : List[Any] = resample SCREAMING_SNAKE_CASE__ : Dict = apply_ocr SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] SCREAMING_SNAKE_CASE__ : Dict = [] for image in images: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) words_batch.append(SCREAMING_SNAKE_CASE__ ) boxes_batch.append(SCREAMING_SNAKE_CASE__ ) if do_resize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) if apply_ocr: SCREAMING_SNAKE_CASE__ : List[Any] = words_batch SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch return data
25
0
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = {"""vocab_file""": """vocab.txt"""} snake_case_ = { """vocab_file""": { """facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""", """facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""", }, } snake_case_ = { """facebook/esm2_t6_8M_UR50D""": 1024, """facebook/esm2_t12_35M_UR50D""": 1024, } def _lowerCAmelCase ( lowercase_ ): with open(lowercase_ , 'r' ) as f: UpperCAmelCase = f.read().splitlines() return [l.strip() for l in lines] class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self :str , lowercase_ :Optional[int] , lowercase_ :List[Any]="<unk>" , lowercase_ :Tuple="<cls>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Tuple="<mask>" , lowercase_ :str="<eos>" , **lowercase_ :List[Any] , ) -> List[Any]: super().__init__(**lowercase_ ) UpperCAmelCase = load_vocab_file(lowercase_ ) UpperCAmelCase = dict(enumerate(self.all_tokens ) ) UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase = unk_token UpperCAmelCase = cls_token UpperCAmelCase = pad_token UpperCAmelCase = mask_token UpperCAmelCase = eos_token UpperCAmelCase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> str: return self._id_to_token.get(lowercase_ , self.unk_token ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> int: return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :int , **lowercase_ :Optional[Any] ) -> int: return text.split() def UpperCAmelCase__ ( self :str , lowercase_ :List[str]=False ) -> Union[str, Any]: return len(self._id_to_token ) def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]: return {token: i for i, token in enumerate(self.all_tokens )} def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str ) -> int: return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) ) def UpperCAmelCase__ ( self :int , lowercase_ :int ) -> str: return self._id_to_token.get(lowercase_ , self.unk_token ) def UpperCAmelCase__ ( self :int , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]: UpperCAmelCase = [self.cls_token_id] UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase__ ( self :str , lowercase_ :List , lowercase_ :Optional[List] = None , lowercase_ :bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase = [1] + ([0] * len(lowercase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowercase_ ) + [1] return mask def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :int ) -> int: UpperCAmelCase = os.path.join(lowercase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' ) with open(lowercase_ , 'w' ) as f: f.write('\n'.join(self.all_tokens ) ) return (vocab_file,) @property def UpperCAmelCase__ ( self :Any ) -> int: return self.get_vocab_size(with_added_tokens=lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[List[str], List[AddedToken]] , lowercase_ :bool = False ) -> int: return super()._add_tokens(lowercase_ , special_tokens=lowercase_ )
78
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Optional[int] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE__ ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : int = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2 return dft[0] def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" ) SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Any = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> List[Any]: '''simple docstring''' _A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _A = s_dict.pop(__lowercase ) elif "subsample" in key: _A = s_dict.pop(__lowercase ) def __lowercase ( __lowercase ) -> Tuple: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def __lowercase ( __lowercase , __lowercase ) -> List[Any]: '''simple docstring''' _A = torch.load(__lowercase , map_location="cpu" ) _A = mam_aaa["args"] _A = mam_aaa["model"] _A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(__lowercase ) rename_keys(__lowercase ) _A = state_dict["decoder.embed_tokens.weight"].shape[0] _A = args.share_decoder_input_output_embed _A = [int(__lowercase ) for i in args.conv_kernel_sizes.split("," )] _A = SpeechaTextConfig( vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=200 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , ) _A = SpeechaTextForConditionalGeneration(__lowercase ) _A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase ) if len(__lowercase ) > 0 and not set(__lowercase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: _A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _A = lm_head_weights model.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
79
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" ,type=_snake_case ,help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) ,) # rest from the training program parser.add_argument("""training_script_args""" ,nargs=_snake_case ) return parser.parse_args() def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : int = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ : int = script_fpath.stem SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case ) # Patch sys.argv SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
25
0
'''simple docstring''' def _UpperCamelCase ( __A , __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = 0 while b > 0: if b & 1: UpperCamelCase__ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
80
"""simple docstring""" def lowercase_ ( _snake_case ,_snake_case ): return 1 if input_a == input_a else 0 def lowercase_ ( ): assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
0