code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
664
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata UpperCAmelCase_ = "" if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class UpperCAmelCase ( tr.AbstractTransform ): def __init__( self , _lowerCAmelCase = " " ): _lowerCAmelCase = sentence_delimiter def __lowerCAmelCase ( self , _lowerCAmelCase ): return list(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = [] for sent_idx, sentence in enumerate(_lowerCAmelCase ): chars.extend(self.process_string(_lowerCAmelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowerCAmelCase ) - 1: chars.append(self.sentence_delimiter ) return chars UpperCAmelCase_ = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: UpperCAmelCase_ = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) UpperCAmelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" UpperCAmelCase_ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" UpperCAmelCase_ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): if concatenate_texts: return jiwer.compute_measures( _lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , )["wer"] _lowerCAmelCase = 0 _lowerCAmelCase = 0 for prediction, reference in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = jiwer.compute_measures( _lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
664
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ = {"UserAgent": UserAgent().random} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict: _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = F'''https://www.instagram.com/{username}/''' _lowerCAmelCase = self.get_json() def __lowerCAmelCase ( self ): _lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text _lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __lowerCAmelCase ( self ): return self.user_data["username"] @property def __lowerCAmelCase ( self ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self ): return self.user_data["biography"] @property def __lowerCAmelCase ( self ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self ): return self.user_data["is_private"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = InstagramUser("github") print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
664
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" UpperCAmelCase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" UpperCAmelCase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , _lowerCAmelCase=False ): _lowerCAmelCase = compute_bleu( reference_corpus=_lowerCAmelCase , translation_corpus=_lowerCAmelCase , max_order=_lowerCAmelCase , smooth=_lowerCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]: _lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ = [] UpperCAmelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCAmelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
664
1
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6_0_0_0 )->List[str]: _lowerCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _lowerCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCAmelCase : SCREAMING_SNAKE_CASE__ = field(default=snake_case_ ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''train''' ,metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } ,) SCREAMING_SNAKE_CASE__ = field( default='''validation''' ,metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default='''audio''' ,metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} ,) SCREAMING_SNAKE_CASE__ = field( default='''label''' ,metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default=2_0 ,metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} ,) @dataclass class UpperCAmelCase : SCREAMING_SNAKE_CASE__ = field( default='''facebook/wav2vec2-base''' ,metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ,) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) SCREAMING_SNAKE_CASE__ = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE__ = field( default=snake_case_ ,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} ,) def __lowerCAmelCase ( self ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def UpperCAmelCase__ ( )->Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _lowerCAmelCase = DatasetDict() _lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'''{", ".join(raw_datasets["train"].column_names )}.''' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'''{", ".join(raw_datasets["train"].column_names )}.''' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _lowerCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _lowerCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Any ): _lowerCAmelCase = [] for audio in batch[data_args.audio_column_name]: _lowerCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _lowerCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _lowerCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _lowerCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _lowerCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _lowerCAmelCase , _lowerCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = str(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = label # Load the accuracy metric from the datasets package _lowerCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : Dict ): _lowerCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _lowerCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _lowerCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _lowerCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: _lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCAmelCase = last_checkpoint _lowerCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _lowerCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) _lowerCAmelCase = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 ) _lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): for example in examples: _lowerCAmelCase = video_classifier(_lowerCAmelCase ) self.assertEqual( _lowerCAmelCase , [ {'''score''': ANY(_lowerCAmelCase ), '''label''': ANY(_lowerCAmelCase )}, {'''score''': ANY(_lowerCAmelCase ), '''label''': ANY(_lowerCAmelCase )}, ] , ) @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' _lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) _lowerCAmelCase = pipeline( '''video-classification''' , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 ) _lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) _lowerCAmelCase = video_classifier(_lowerCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) _lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __lowerCAmelCase ( self ): pass
664
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
1
UpperCAmelCase_ = 8.31_4462 # Unit - J mol-1 K-1 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
664
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { "configuration_clipseg": [ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "processing_clipseg": ["CLIPSegProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", "CLIPSegVisionModel", "CLIPSegForImageSegmentation", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
import heapq import sys import numpy as np UpperCAmelCase_ = tuple[int, int] class UpperCAmelCase : def __init__( self ): _lowerCAmelCase = [] _lowerCAmelCase = set() def __lowerCAmelCase ( self ): if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def __lowerCAmelCase ( self ): return len(self.elements ) == 0 def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowerCAmelCase ) else: # update # print("update", item) _lowerCAmelCase = [] ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def __lowerCAmelCase ( self , _lowerCAmelCase ): if item in self.set: self.set.remove(_lowerCAmelCase ) _lowerCAmelCase = [] ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def __lowerCAmelCase ( self ): return self.elements[0][1] def __lowerCAmelCase ( self ): ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(_lowerCAmelCase ) return (priority, item) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : TPos )->List[str]: # euclidean distance _lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ) return np.linalg.norm(a - b ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : TPos )->List[str]: # integer division by time variable return consistent_heuristic(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) // t def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : TPos )->List[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : dict[TPos, float] )->List[Any]: _lowerCAmelCase = g_function[start] + Wa * heuristics[i](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return ans def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = np.chararray((n, n) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = '''*''' for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if (j, (n - 1) - i) in blocks: _lowerCAmelCase = '''#''' _lowerCAmelCase = '''-''' _lowerCAmelCase = back_pointer[goal] while x != start: ((_lowerCAmelCase) , (_lowerCAmelCase)) = x # print(x) _lowerCAmelCase = '''-''' _lowerCAmelCase = back_pointer[x] _lowerCAmelCase = '''-''' for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) _lowerCAmelCase = back_pointer[goal] while x != start: print(_SCREAMING_SNAKE_CASE , end=''' ''' ) _lowerCAmelCase = back_pointer[x] print(_SCREAMING_SNAKE_CASE ) sys.exit() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos )->Optional[int]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , )->str: for itera in range(_SCREAMING_SNAKE_CASE ): open_list[itera].remove_element(_SCREAMING_SNAKE_CASE ) # print("s", s) # print("j", j) ((_lowerCAmelCase) , (_lowerCAmelCase)) = s _lowerCAmelCase = (x - 1, y) _lowerCAmelCase = (x + 1, y) _lowerCAmelCase = (x, y + 1) _lowerCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_SCREAMING_SNAKE_CASE ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = -1 _lowerCAmelCase = float('''inf''' ) if valid(_SCREAMING_SNAKE_CASE ) and g_function[neighbours] > g_function[s] + 1: _lowerCAmelCase = g_function[s] + 1 _lowerCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if neighbours not in close_list_inad: for var in range(1 , _SCREAMING_SNAKE_CASE ): if key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) <= Wa * key( _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): open_list[j].put( _SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( )->Optional[Any]: _lowerCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list UpperCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCAmelCase_ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] UpperCAmelCase_ = make_common_ground() UpperCAmelCase_ = blocks_blk # hyper parameters UpperCAmelCase_ = 1 UpperCAmelCase_ = 1 UpperCAmelCase_ = 2_0 UpperCAmelCase_ = 3 # one consistent and two other inconsistent # start and end destination UpperCAmelCase_ = (0, 0) UpperCAmelCase_ = (n - 1, n - 1) UpperCAmelCase_ = 1 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : TPos , _SCREAMING_SNAKE_CASE : int )->List[Any]: _lowerCAmelCase = {start: 0, goal: float('''inf''' )} _lowerCAmelCase = {start: -1, goal: -1} _lowerCAmelCase = [] _lowerCAmelCase = set() for i in range(_SCREAMING_SNAKE_CASE ): open_list.append(PriorityQueue() ) open_list[i].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = [] _lowerCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , _SCREAMING_SNAKE_CASE ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase , _lowerCAmelCase = open_list[i].top_show() visited.add(_SCREAMING_SNAKE_CASE ) expand_state( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) close_list_inad.append(_SCREAMING_SNAKE_CASE ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = open_list[0].top_show() visited.add(_SCREAMING_SNAKE_CASE ) expand_state( _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) close_list_anchor.append(_SCREAMING_SNAKE_CASE ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_SCREAMING_SNAKE_CASE ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
664
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool: _lowerCAmelCase = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): _lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
664
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
1
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , )->tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
664
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _lowerCAmelCase = get_sagemaker_input() else: _lowerCAmelCase = get_cluster_input() return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str: if subparsers is not None: _lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str: _lowerCAmelCase = get_user_input() if args.config_file is not None: _lowerCAmelCase = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCAmelCase__ ( )->List[Any]: _lowerCAmelCase = config_command_parser() _lowerCAmelCase = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = process _lowerCAmelCase = params def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): _lowerCAmelCase = self.dataset[i] _lowerCAmelCase = self.process(_lowerCAmelCase , **self.params ) return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): _lowerCAmelCase = loader _lowerCAmelCase = infer _lowerCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase = None _lowerCAmelCase = loader_batch_size # Internal bookkeeping _lowerCAmelCase = None _lowerCAmelCase = None def __len__( self ): return len(self.loader ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first _lowerCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase = next(self.iterator ) _lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase = processed _lowerCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) _lowerCAmelCase = None return self def __lowerCAmelCase ( self ): if self.subiterator is None: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _lowerCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) _lowerCAmelCase = next(self.subiterator ) return processed class UpperCAmelCase ( snake_case_ ): def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _lowerCAmelCase = False _lowerCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size _lowerCAmelCase = processed _lowerCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: _lowerCAmelCase = processed _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) return accumulator class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = key def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return self.dataset[i][self.key] class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = keya _lowerCAmelCase = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
664
1
from __future__ import annotations from collections.abc import Iterator class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = value _lowerCAmelCase = None _lowerCAmelCase = None class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = tree def __lowerCAmelCase ( self , _lowerCAmelCase ): if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ): yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
664
import numpy class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _lowerCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _lowerCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _lowerCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. _lowerCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _lowerCAmelCase = numpy.zeros(output_array.shape ) def __lowerCAmelCase ( self ): _lowerCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __lowerCAmelCase ( self ): _lowerCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _lowerCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _lowerCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): for iteration in range(1 , iterations + 1 ): _lowerCAmelCase = self.feedforward() self.back_propagation() if give_loss: _lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = input_arr _lowerCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return (value) * (1 - (value)) def UpperCAmelCase__ ( )->int: _lowerCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _lowerCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
1
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->str: _lowerCAmelCase = [] for line in lines: _lowerCAmelCase = re.sub(r'''#.*''' , '''''' , _SCREAMING_SNAKE_CASE ) # remove comments if line: filtered_lines.append(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = '''\n'''.join(_SCREAMING_SNAKE_CASE ) # Make a hash from all this code _lowerCAmelCase = full_str.encode('''utf-8''' ) return shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name UpperCAmelCase_ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = crop_size def __lowerCAmelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool: _lowerCAmelCase = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): _lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
664
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) SCREAMING_SNAKE_CASE__ = ['''accelerate''', '''launch'''] SCREAMING_SNAKE_CASE__ = Path.home() / '''.cache/huggingface/accelerate''' SCREAMING_SNAKE_CASE__ = '''default_config.yaml''' SCREAMING_SNAKE_CASE__ = config_folder / config_file SCREAMING_SNAKE_CASE__ = config_folder / '''_default_config.yaml''' SCREAMING_SNAKE_CASE__ = Path('''tests/test_configs''' ) @classmethod def __lowerCAmelCase ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def __lowerCAmelCase ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def __lowerCAmelCase ( self ): for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=_lowerCAmelCase ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(_lowerCAmelCase ), self.test_file_path] , env=os.environ.copy() ) def __lowerCAmelCase ( self ): execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ = '''test-tpu''' SCREAMING_SNAKE_CASE__ = '''us-central1-a''' SCREAMING_SNAKE_CASE__ = '''ls''' SCREAMING_SNAKE_CASE__ = ['''accelerate''', '''tpu-config'''] SCREAMING_SNAKE_CASE__ = '''cd /usr/share''' SCREAMING_SNAKE_CASE__ = '''tests/test_samples/test_command_file.sh''' SCREAMING_SNAKE_CASE__ = '''Running gcloud compute tpus tpu-vm ssh''' def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowerCAmelCase ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=_lowerCAmelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCAmelCase , )
664
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
664
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = crop_size def __lowerCAmelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = '''focalnet''' def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=False , _lowerCAmelCase=[192, 384, 768, 768] , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[2, 2, 2, 2] , _lowerCAmelCase=[3, 3, 3, 3] , _lowerCAmelCase="gelu" , _lowerCAmelCase=4.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=False , _lowerCAmelCase=1E-4 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = use_conv_embed _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = focal_levels _lowerCAmelCase = focal_windows _lowerCAmelCase = hidden_act _lowerCAmelCase = mlp_ratio _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = use_layerscale _lowerCAmelCase = layerscale_value _lowerCAmelCase = use_post_layernorm _lowerCAmelCase = use_post_layernorm_in_modulation _lowerCAmelCase = normalize_modulator _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = encoder_stride _lowerCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] _lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
664
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any: return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = ssent.split(''' ''' ) _lowerCAmelCase = csent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str: if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _lowerCAmelCase = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE ) return 1_0_0 * sari_score def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str: _lowerCAmelCase = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _lowerCAmelCase = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = {} result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) return result
664
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self , _lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): _lowerCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sgugger/tiny-distilbert-classification''' _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , only_pretrain_model=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase , [config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase , [config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase , [config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''patrickvonplaten/t5-tiny-random''' _lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase , configs=[config] ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowerCAmelCase , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_lowerCAmelCase , save_to_csv=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_lowerCAmelCase , '''env.csv''' ) , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''env.csv''' ) ).exists() ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase , '''sequential''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''current''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCAmelCase , '''log.txt''' ) , log_print=_lowerCAmelCase , trace_memory_line_by_line=_lowerCAmelCase , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , ) _lowerCAmelCase = TensorFlowBenchmark(_lowerCAmelCase ) _lowerCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''log.txt''' ) ).exists() )
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["DeiTFeatureExtractor"] UpperCAmelCase_ = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
from typing import List from .keymap import KEYMAP, get_character def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Tuple: def decorator(_SCREAMING_SNAKE_CASE : List[Any] ): _lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , [] ) handle += [key] setattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , _SCREAMING_SNAKE_CASE ) return func return decorator def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : List[str] )->Tuple: def decorator(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , [] ) handle += keys setattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , _SCREAMING_SNAKE_CASE ) return func return decorator class UpperCAmelCase ( snake_case_ ): def __new__( cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not hasattr(_lowerCAmelCase , '''key_handler''' ): setattr(_lowerCAmelCase , '''key_handler''' , {} ) setattr(_lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _lowerCAmelCase = getattr(_lowerCAmelCase , '''handle_key''' , [] ) for key in handled_keys: _lowerCAmelCase = value return new_cls @staticmethod def __lowerCAmelCase ( cls ): _lowerCAmelCase = get_character() if char != KEYMAP["undefined"]: _lowerCAmelCase = ord(_lowerCAmelCase ) _lowerCAmelCase = cls.key_handler.get(_lowerCAmelCase ) if handler: _lowerCAmelCase = char return handler(cls ) else: return None def UpperCAmelCase__ ( cls : Union[str, Any] )->Optional[Any]: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] * n _lowerCAmelCase = [False] * n _lowerCAmelCase = [False] * n def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): if parent == root: out_edge_count += 1 _lowerCAmelCase = True _lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _lowerCAmelCase = True # AP found via cycle if at == low[to]: _lowerCAmelCase = True else: _lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: _lowerCAmelCase = 0 _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph UpperCAmelCase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
664
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("T") class UpperCAmelCase ( Generic[T] ): SCREAMING_SNAKE_CASE__ = 42 # Cache store of keys SCREAMING_SNAKE_CASE__ = 42 # References of the keys in cache SCREAMING_SNAKE_CASE__ = 1_0 # Maximum capacity of cache def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = deque() _lowerCAmelCase = set() if not n: _lowerCAmelCase = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: _lowerCAmelCase = n def __lowerCAmelCase ( self , _lowerCAmelCase ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: _lowerCAmelCase = self.dq_store.pop() self.key_reference.remove(_lowerCAmelCase ) else: self.dq_store.remove(_lowerCAmelCase ) self.dq_store.appendleft(_lowerCAmelCase ) self.key_reference.add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): for k in self.dq_store: print(_lowerCAmelCase ) def __repr__( self ): return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
664
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = SMALL_MODEL_IDENTIFIER _lowerCAmelCase = '''pt''' _lowerCAmelCase = '''tf''' def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase ) model_tf.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''mock_framework''' # Framework provided - return whatever the user provides _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
664
1
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int | float]] )->int: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = len(matrix[0] ) _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for row in range(_SCREAMING_SNAKE_CASE ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = matrix[col][row] / matrix[row][row] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows _lowerCAmelCase = True for i in range(row + 1 , _SCREAMING_SNAKE_CASE ): if matrix[i][row] != 0: _lowerCAmelCase , _lowerCAmelCase = matrix[i], matrix[row] _lowerCAmelCase = False break if reduce: rank -= 1 for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
664
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
664
1
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ = {"UserAgent": UserAgent().random} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict: _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = F'''https://www.instagram.com/{username}/''' _lowerCAmelCase = self.get_json() def __lowerCAmelCase ( self ): _lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text _lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __lowerCAmelCase ( self ): return self.user_data["username"] @property def __lowerCAmelCase ( self ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self ): return self.user_data["biography"] @property def __lowerCAmelCase ( self ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self ): return self.user_data["is_private"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = InstagramUser("github") print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
664
1
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]: _lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ = [] UpperCAmelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCAmelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
664
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 20} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_flip_channel_order def __lowerCAmelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_flip_channel_order''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
1
from string import ascii_uppercase UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)} UpperCAmelCase_ = dict(enumerate(ascii_uppercase)) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->str: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 while True: if x == i: _lowerCAmelCase = 0 if len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ): break key += key[i] i += 1 return key def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->str: _lowerCAmelCase = '''''' _lowerCAmelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: _lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6 i += 1 cipher_text += dicta[x] return cipher_text def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->str: _lowerCAmelCase = '''''' _lowerCAmelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: _lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6 i += 1 or_txt += dicta[x] return or_txt def UpperCAmelCase__ ( )->None: _lowerCAmelCase = '''THE GERMAN ATTACK''' _lowerCAmelCase = '''SECRET''' _lowerCAmelCase = generate_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = cipher_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
664
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE__ = '''BlipImageProcessor''' SCREAMING_SNAKE_CASE__ = '''AutoTokenizer''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = False super().__init__(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = self.image_processor def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: _lowerCAmelCase = self.tokenizer _lowerCAmelCase = self.tokenizer( text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) return text_encoding # add pixel_values _lowerCAmelCase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) if text is not None: _lowerCAmelCase = self.tokenizer( text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) else: _lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(_lowerCAmelCase ) return encoding_image_processor def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
664
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->List[Any]: _lowerCAmelCase = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class UpperCAmelCase ( snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = StableDiffusionLatentUpscalePipeline SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE__ = frozenset([] ) SCREAMING_SNAKE_CASE__ = True @property def __lowerCAmelCase ( self ): _lowerCAmelCase = 1 _lowerCAmelCase = 4 _lowerCAmelCase = (16, 16) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase ) return image def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_lowerCAmelCase , only_cross_attention=_lowerCAmelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) _lowerCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''quick_gelu''' , projection_dim=512 , ) _lowerCAmelCase = CLIPTextModel(_lowerCAmelCase ) _lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _lowerCAmelCase = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) _lowerCAmelCase = np.array( [0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def __lowerCAmelCase ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def __lowerCAmelCase ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def __lowerCAmelCase ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def __lowerCAmelCase ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = 2 _lowerCAmelCase = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue _lowerCAmelCase = getattr(_lowerCAmelCase , scheduler_enum.name ) _lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config ) _lowerCAmelCase = pipe(**_lowerCAmelCase )[0] outputs.append(_lowerCAmelCase ) assert check_same_shape(_lowerCAmelCase ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(33 ) _lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) _lowerCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , output_type='''latent''' ).images _lowerCAmelCase = upscaler( prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type='''np''' , ).images[0] _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(33 ) _lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) _lowerCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' _lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) _lowerCAmelCase = upscaler( prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type='''np''' , ).images[0] _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-2
664
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = image.size _lowerCAmelCase , _lowerCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 _lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) _lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0 _lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ) return 2.0 * image - 1.0 class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): super().__init__() self.register_modules(vqvae=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) @torch.no_grad() def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ): if isinstance(_lowerCAmelCase , PIL.Image.Image ): _lowerCAmelCase = 1 elif isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCAmelCase )}''' ) if isinstance(_lowerCAmelCase , PIL.Image.Image ): _lowerCAmelCase = preprocess(_lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _lowerCAmelCase = next(self.unet.parameters() ).dtype _lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase ) _lowerCAmelCase = image.to(device=self.device , dtype=_lowerCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device ) _lowerCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase = {} if accepts_eta: _lowerCAmelCase = eta for t in self.progress_bar(_lowerCAmelCase ): # concat latents and low resolution image in the channel dimension. _lowerCAmelCase = torch.cat([latents, image] , dim=1 ) _lowerCAmelCase = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase ) # predict the noise residual _lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample # decode the image latents with the VQVAE _lowerCAmelCase = self.vqvae.decode(_lowerCAmelCase ).sample _lowerCAmelCase = torch.clamp(_lowerCAmelCase , -1.0 , 1.0 ) _lowerCAmelCase = image / 2 + 0.5 _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCAmelCase )
664
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
1
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=False )->int: _lowerCAmelCase = OmegaConf.load(_SCREAMING_SNAKE_CASE ) if display: print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) ) return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None )->Union[str, Any]: if conf_path is None: _lowerCAmelCase = '''./model_checkpoints/vqgan_only.yaml''' _lowerCAmelCase = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = VQModel(**config.model.params ) if ckpt_path is None: _lowerCAmelCase = '''./model_checkpoints/vqgan_only.pt''' _lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE ) if ".ckpt" in ckpt_path: _lowerCAmelCase = sd['''state_dict'''] model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) del sd return model def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model.encode(_SCREAMING_SNAKE_CASE ) print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) _lowerCAmelCase = model.decode(_SCREAMING_SNAKE_CASE ) return xrec def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any=False )->str: _lowerCAmelCase , _lowerCAmelCase = string.rsplit('''.''' , 1 ) if reload: _lowerCAmelCase = importlib.import_module(_SCREAMING_SNAKE_CASE ) importlib.reload(_SCREAMING_SNAKE_CASE ) return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Dict: if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[int]=True )->List[str]: _lowerCAmelCase = instantiate_from_config(_SCREAMING_SNAKE_CASE ) if sd is not None: model.load_state_dict(_SCREAMING_SNAKE_CASE ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[Any]: # load the specified checkpoint if ckpt: _lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _lowerCAmelCase = pl_sd['''global_step'''] print(f'''loaded model from global step {global_step}.''' ) else: _lowerCAmelCase = {'''state_dict''': None} _lowerCAmelCase = None _lowerCAmelCase = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )['''model'''] return model, global_step
664
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
1
UpperCAmelCase_ = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->list[str]: _lowerCAmelCase = set() # keep track of all the paths to be checked _lowerCAmelCase = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCAmelCase = queue.pop(0 ) # get the last node from the path _lowerCAmelCase = path[-1] if node not in explored: _lowerCAmelCase = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) new_path.append(_SCREAMING_SNAKE_CASE ) queue.append(_SCREAMING_SNAKE_CASE ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_SCREAMING_SNAKE_CASE ) # in case there's no path between the 2 nodes return [] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCAmelCase = [start] _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) # Keep tab on distances from `start` node. _lowerCAmelCase = {start: 0, target: -1} while queue: _lowerCAmelCase = queue.pop(0 ) if node == target: _lowerCAmelCase = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_SCREAMING_SNAKE_CASE ) queue.append(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
664
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _lowerCAmelCase = get_sagemaker_input() else: _lowerCAmelCase = get_cluster_input() return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str: if subparsers is not None: _lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str: _lowerCAmelCase = get_user_input() if args.config_file is not None: _lowerCAmelCase = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCAmelCase__ ( )->List[Any]: _lowerCAmelCase = config_command_parser() _lowerCAmelCase = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self ): _lowerCAmelCase = 1 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase ) return image @property def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_lowerCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.dummy_cond_unet_upscale _lowerCAmelCase = DDPMScheduler() _lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' ) _lowerCAmelCase = self.dummy_vae _lowerCAmelCase = self.dummy_text_encoder _lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase = StableDiffusionUpscalePipeline( unet=_lowerCAmelCase , low_res_scheduler=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , max_noise_level=350 , ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = sd_pipe( [prompt] , image=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _lowerCAmelCase = output.images _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = sd_pipe( [prompt] , image=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=_lowerCAmelCase , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.dummy_cond_unet_upscale _lowerCAmelCase = DDPMScheduler() _lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' ) _lowerCAmelCase = self.dummy_vae _lowerCAmelCase = self.dummy_text_encoder _lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase = StableDiffusionUpscalePipeline( unet=_lowerCAmelCase , low_res_scheduler=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , max_noise_level=350 , ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _lowerCAmelCase = output.images assert image.shape[0] == 2 _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = sd_pipe( [prompt] , image=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _lowerCAmelCase = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.dummy_cond_unet_upscale _lowerCAmelCase = DDPMScheduler() _lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' ) _lowerCAmelCase = self.dummy_vae _lowerCAmelCase = self.dummy_text_encoder _lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase = unet.half() _lowerCAmelCase = text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase = StableDiffusionUpscalePipeline( unet=_lowerCAmelCase , low_res_scheduler=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , max_noise_level=350 , ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = sd_pipe( [prompt] , image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images _lowerCAmelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) _lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler''' _lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase = '''a cat sitting on a park bench''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=_lowerCAmelCase , image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type='''np''' , ) _lowerCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def __lowerCAmelCase ( self ): _lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) _lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler''' _lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained( _lowerCAmelCase , torch_dtype=torch.floataa , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase = '''a cat sitting on a park bench''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=_lowerCAmelCase , image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type='''np''' , ) _lowerCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __lowerCAmelCase ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler''' _lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained( _lowerCAmelCase , torch_dtype=torch.floataa , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase = '''a cat sitting on a park bench''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=_lowerCAmelCase , image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , output_type='''np''' , ) _lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
664
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , ) _lowerCAmelCase = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths} _lowerCAmelCase = Text( cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , **_lowerCAmelCase , ) def __lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: _lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None self.builder.download_and_prepare( download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , ) _lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset
664
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = process _lowerCAmelCase = params def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): _lowerCAmelCase = self.dataset[i] _lowerCAmelCase = self.process(_lowerCAmelCase , **self.params ) return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): _lowerCAmelCase = loader _lowerCAmelCase = infer _lowerCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase = None _lowerCAmelCase = loader_batch_size # Internal bookkeeping _lowerCAmelCase = None _lowerCAmelCase = None def __len__( self ): return len(self.loader ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first _lowerCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase = next(self.iterator ) _lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase = processed _lowerCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) _lowerCAmelCase = None return self def __lowerCAmelCase ( self ): if self.subiterator is None: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _lowerCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) _lowerCAmelCase = next(self.subiterator ) return processed class UpperCAmelCase ( snake_case_ ): def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _lowerCAmelCase = False _lowerCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size _lowerCAmelCase = processed _lowerCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: _lowerCAmelCase = processed _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) return accumulator class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = key def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return self.dataset[i][self.key] class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = keya _lowerCAmelCase = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
664
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["RobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
import numpy class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _lowerCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _lowerCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _lowerCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. _lowerCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _lowerCAmelCase = numpy.zeros(output_array.shape ) def __lowerCAmelCase ( self ): _lowerCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __lowerCAmelCase ( self ): _lowerCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _lowerCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _lowerCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): for iteration in range(1 , iterations + 1 ): _lowerCAmelCase = self.feedforward() self.back_propagation() if give_loss: _lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = input_arr _lowerCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return (value) * (1 - (value)) def UpperCAmelCase__ ( )->int: _lowerCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _lowerCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
1
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right UpperCAmelCase_ = 5_0_0_0_3 UpperCAmelCase_ = 5_0_0_0_2 @require_sentencepiece @require_tokenizers class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = PLBartTokenizer SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''base''' , keep_accents=_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self ): _lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''base''' , keep_accents=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _lowerCAmelCase = tokenizer.vocab_size _lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 4 , _lowerCAmelCase )] self.assertListEqual(_lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] ) _lowerCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids self.assertEqual( tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''multi''' , keep_accents=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _lowerCAmelCase = tokenizer.vocab_size _lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 7 , _lowerCAmelCase )] self.assertListEqual( _lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] ) _lowerCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids self.assertEqual( tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ = '''uclanlp/plbart-python-en_XX''' SCREAMING_SNAKE_CASE__ = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] SCREAMING_SNAKE_CASE__ = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] SCREAMING_SNAKE_CASE__ = [ 1_3_4, 5_4_5_2, 3_3_4_6_0, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 9_8_8, 2_0, 3_3_4_5_6, 1_9, 3_3_4_5_6, 7_7_1, 3_9, 4_2_5_8, 8_8_9, 3_3_1_8, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 2_4_7_1, 2, PYTHON_CODE, ] @classmethod def __lowerCAmelCase ( cls ): _lowerCAmelCase = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' ) _lowerCAmelCase = 1 return cls def __lowerCAmelCase ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) def __lowerCAmelCase ( self ): self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids ) _lowerCAmelCase = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2] _lowerCAmelCase = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20] self.assertIsInstance(src_text[0] , _lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , _lowerCAmelCase ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) def __lowerCAmelCase ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = PLBartTokenizer.from_pretrained(_lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase ) @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='''pt''' ) _lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCAmelCase ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) _lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='''pt''' ) _lowerCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='''pt''' ) _lowerCAmelCase = targets['''input_ids'''] _lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { # A, test, EOS, en_XX '''input_ids''': [[150, 242, 2, 50_003]], '''attention_mask''': [[1, 1, 1, 1]], # java '''forced_bos_token_id''': 50_001, } , )
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''Salesforce/blip-image-captioning-base''' SCREAMING_SNAKE_CASE__ = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) SCREAMING_SNAKE_CASE__ = '''image_captioner''' SCREAMING_SNAKE_CASE__ = AutoModelForVisionaSeq SCREAMING_SNAKE_CASE__ = ['''image'''] SCREAMING_SNAKE_CASE__ = ['''text'''] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): return self.pre_processor(images=_lowerCAmelCase , return_tensors='''pt''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): return self.model.generate(**_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): return self.pre_processor.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )[0].strip()
664
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool: _lowerCAmelCase = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): _lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
664
1
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): _lowerCAmelCase = '''laion/clap-htsat-unfused''' _lowerCAmelCase = tempfile.mkdtemp() def __lowerCAmelCase ( self , **_lowerCAmelCase ): return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase ) def __lowerCAmelCase ( self , **_lowerCAmelCase ): return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowerCAmelCase ) def __lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _lowerCAmelCase = self.get_feature_extractor(do_normalize=_lowerCAmelCase , padding_value=1.0 ) _lowerCAmelCase = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) _lowerCAmelCase = floats_list((3, 1_000) ) _lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors='''np''' ) _lowerCAmelCase = processor(audios=_lowerCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) _lowerCAmelCase = '''This is a test string''' _lowerCAmelCase = processor(text=_lowerCAmelCase ) _lowerCAmelCase = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) _lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase = processor.batch_decode(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
664
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class UpperCAmelCase ( unittest.TestCase ,snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = load_tool('''text-to-speech''' ) self.tool.setup() def __lowerCAmelCase ( self ): # SpeechT5 isn't deterministic torch.manual_seed(0 ) _lowerCAmelCase = self.tool('''hey''' ) _lowerCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) ) def __lowerCAmelCase ( self ): # SpeechT5 isn't deterministic torch.manual_seed(0 ) _lowerCAmelCase = self.tool('''hey''' ) _lowerCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
664
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
664
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {} class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''llama''' SCREAMING_SNAKE_CASE__ = ['''past_key_values'''] def __init__( self , _lowerCAmelCase=32_000 , _lowerCAmelCase=4_096 , _lowerCAmelCase=11_008 , _lowerCAmelCase=32 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase="silu" , _lowerCAmelCase=2_048 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=None , **_lowerCAmelCase , ): _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = hidden_size _lowerCAmelCase = intermediate_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase = num_attention_heads _lowerCAmelCase = num_key_value_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = initializer_range _lowerCAmelCase = rms_norm_eps _lowerCAmelCase = pretraining_tp _lowerCAmelCase = use_cache _lowerCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase , ) def __lowerCAmelCase ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) _lowerCAmelCase = self.rope_scaling.get('''type''' , _lowerCAmelCase ) _lowerCAmelCase = self.rope_scaling.get('''factor''' , _lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
664
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = crop_size def __lowerCAmelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
1
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler('''sample_euler''' ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _lowerCAmelCase = output.images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler('''sample_euler''' ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _lowerCAmelCase = output.images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __lowerCAmelCase ( self ): _lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _lowerCAmelCase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _lowerCAmelCase = '''A painting of a squirrel eating a burger''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = sd_pipe( [prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_lowerCAmelCase , ) _lowerCAmelCase = output.images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
664
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any: return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = ssent.split(''' ''' ) _lowerCAmelCase = csent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str: if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _lowerCAmelCase = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE ) return 1_0_0 * sari_score def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str: _lowerCAmelCase = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _lowerCAmelCase = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = {} result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) return result
664
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCAmelCase_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCAmelCase_ = ( subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split() ) UpperCAmelCase_ = "|".join(sys.argv[1:]) UpperCAmelCase_ = re.compile(RF"""^({joined_dirs}).*?\.py$""") UpperCAmelCase_ = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["DeiTFeatureExtractor"] UpperCAmelCase_ = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = 5 # Realm tok _lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) _lowerCAmelCase = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) def __lowerCAmelCase ( self ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def __lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ): _lowerCAmelCase = RealmConfig(num_block_records=self.num_block_records ) return config def __lowerCAmelCase ( self ): _lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def __lowerCAmelCase ( self ): _lowerCAmelCase = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=_lowerCAmelCase , ) return block_records def __lowerCAmelCase ( self ): _lowerCAmelCase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = self.get_dummy_retriever() _lowerCAmelCase = retriever.tokenizer _lowerCAmelCase = np.array([0, 3] , dtype='''long''' ) _lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids _lowerCAmelCase = tokenizer( ['''the fourth'''] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids _lowerCAmelCase = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='''np''' ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = self.get_dummy_retriever() _lowerCAmelCase = retriever.tokenizer _lowerCAmelCase = np.array([0, 3, 5] , dtype='''long''' ) _lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids _lowerCAmelCase = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids _lowerCAmelCase = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='''np''' ) self.assertEqual([False, True, True] , _lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path _lowerCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: _lowerCAmelCase = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) _lowerCAmelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] * n _lowerCAmelCase = [False] * n _lowerCAmelCase = [False] * n def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): if parent == root: out_edge_count += 1 _lowerCAmelCase = True _lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _lowerCAmelCase = True # AP found via cycle if at == low[to]: _lowerCAmelCase = True else: _lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: _lowerCAmelCase = 0 _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph UpperCAmelCase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
664
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): UpperCAmelCase_ = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: UpperCAmelCase_ = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Tuple: _lowerCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _lowerCAmelCase = numpy_to_pil(_SCREAMING_SNAKE_CASE ) return images def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->List[Any]: if images.ndim == 3: _lowerCAmelCase = images[None, ...] _lowerCAmelCase = (images * 2_5_5).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _lowerCAmelCase = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _lowerCAmelCase = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for image in images] return pil_images
664
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = SMALL_MODEL_IDENTIFIER _lowerCAmelCase = '''pt''' _lowerCAmelCase = '''tf''' def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase ) model_tf.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''mock_framework''' # Framework provided - return whatever the user provides _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
664
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { "configuration_longformer": [ "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerOnnxConfig", ], "tokenization_longformer": ["LongformerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LongformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", "LongformerForSequenceClassification", "LongformerForTokenClassification", "LongformerModel", "LongformerPreTrainedModel", "LongformerSelfAttention", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", "TFLongformerSelfAttention", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
664
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json UpperCAmelCase_ = "sshleifer/mar_enro_6_3_student" class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): super().setUp() _lowerCAmelCase = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_lowerCAmelCase , ) _lowerCAmelCase = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k''' @slow @require_torch_gpu def __lowerCAmelCase ( self ): MarianMTModel.from_pretrained(_lowerCAmelCase ) @slow @require_torch_gpu def __lowerCAmelCase ( self ): _lowerCAmelCase = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script _lowerCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() _lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): _lowerCAmelCase = bash_script.replace(_lowerCAmelCase , str(_lowerCAmelCase ) ) _lowerCAmelCase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") _lowerCAmelCase = F''' --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 '''.split() # XXX: args.gpus > 1 : handle multi_gpu in the future _lowerCAmelCase = ['''finetune.py'''] + bash_script.split() + args with patch.object(_lowerCAmelCase , '''argv''' , _lowerCAmelCase ): _lowerCAmelCase = argparse.ArgumentParser() _lowerCAmelCase = pl.Trainer.add_argparse_args(_lowerCAmelCase ) _lowerCAmelCase = SummarizationModule.add_model_specific_args(_lowerCAmelCase , os.getcwd() ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = main(_lowerCAmelCase ) # Check metrics _lowerCAmelCase = load_json(model.metrics_save_path ) _lowerCAmelCase = metrics['''val'''][0] _lowerCAmelCase = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowerCAmelCase ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict _lowerCAmelCase = os.listdir(_lowerCAmelCase ) _lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] _lowerCAmelCase = os.path.join(args.output_dir , _lowerCAmelCase ) _lowerCAmelCase = torch.load(_lowerCAmelCase , map_location='''cpu''' ) _lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _lowerCAmelCase = {os.path.basename(_lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class UpperCAmelCase ( snake_case_ ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __lowerCAmelCase ( self ): _lowerCAmelCase = F'''{self.test_file_dir_str}/test_data/wmt_en_ro''' _lowerCAmelCase = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script _lowerCAmelCase = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) _lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) _lowerCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): _lowerCAmelCase = bash_script.replace(_lowerCAmelCase , str(_lowerCAmelCase ) ) _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = bash_script.replace('''--fp16''' , '''''' ) _lowerCAmelCase = 6 _lowerCAmelCase = ( ['''distillation.py'''] + bash_script.split() + [ F'''--output_dir={output_dir}''', '''--gpus=1''', '''--learning_rate=1e-3''', F'''--num_train_epochs={epochs}''', '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(_lowerCAmelCase , '''argv''' , _lowerCAmelCase ): _lowerCAmelCase = argparse.ArgumentParser() _lowerCAmelCase = pl.Trainer.add_argparse_args(_lowerCAmelCase ) _lowerCAmelCase = SummarizationDistiller.add_model_specific_args(_lowerCAmelCase , os.getcwd() ) _lowerCAmelCase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu _lowerCAmelCase = distill_main(_lowerCAmelCase ) # Check metrics _lowerCAmelCase = load_json(model.metrics_save_path ) _lowerCAmelCase = metrics['''val'''][0] _lowerCAmelCase = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowerCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict _lowerCAmelCase = os.listdir(_lowerCAmelCase ) _lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] _lowerCAmelCase = os.path.join(args.output_dir , _lowerCAmelCase ) _lowerCAmelCase = torch.load(_lowerCAmelCase , map_location='''cpu''' ) _lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _lowerCAmelCase = {os.path.basename(_lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
664
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ = {"UserAgent": UserAgent().random} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict: _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = F'''https://www.instagram.com/{username}/''' _lowerCAmelCase = self.get_json() def __lowerCAmelCase ( self ): _lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text _lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __lowerCAmelCase ( self ): return self.user_data["username"] @property def __lowerCAmelCase ( self ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self ): return self.user_data["biography"] @property def __lowerCAmelCase ( self ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self ): return self.user_data["is_private"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = InstagramUser("github") print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
664
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=[10, 20, 30, 40] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = embeddings_size _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = num_labels _lowerCAmelCase = scope _lowerCAmelCase = len(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = self.get_config() return config, pixel_values def __lowerCAmelCase ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = FlaxRegNetModel(config=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = FlaxRegNetForImageClassification(config=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): _lowerCAmelCase = FlaxRegNetModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def __lowerCAmelCase ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCAmelCase ( self ): return def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __lowerCAmelCase ( self ): pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def __lowerCAmelCase ( self ): def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model_class(_lowerCAmelCase ) @jax.jit def model_jitted(_lowerCAmelCase , **_lowerCAmelCase ): return model(pixel_values=_lowerCAmelCase , **_lowerCAmelCase ) with self.subTest('''JIT Enabled''' ): _lowerCAmelCase = model_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _lowerCAmelCase = model_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase__ ( )->Optional[Any]: _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class UpperCAmelCase ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ): return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors='''np''' ) _lowerCAmelCase = model(**_lowerCAmelCase ) # verify the logits _lowerCAmelCase = (1, 1_000) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _lowerCAmelCase = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]: _lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ = [] UpperCAmelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCAmelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
664
1
import torch from transformers import AutoModel class UpperCAmelCase ( torch.nn.Module ): def __init__( self , _lowerCAmelCase="sayef/fsner-bert-base-uncased" ): super(_lowerCAmelCase , self ).__init__() _lowerCAmelCase = AutoModel.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) _lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1E-08 ) _lowerCAmelCase = torch.nn.Softmax(dim=1 ) def __lowerCAmelCase ( self , **_lowerCAmelCase ): return self.bert(**_lowerCAmelCase ).last_hidden_state def __lowerCAmelCase ( self , _lowerCAmelCase ): return token_embeddings.sum(2 , keepdim=_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ): return self.softmax(T * self.cos(_lowerCAmelCase , _lowerCAmelCase ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = W_supports['''sizes'''].tolist() _lowerCAmelCase = W_supports['''start_token_id'''].item() _lowerCAmelCase = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _lowerCAmelCase = self.BERT(**_lowerCAmelCase ) _lowerCAmelCase = self.BERT(**_lowerCAmelCase ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = W_supports['''input_ids'''] == start_token_id _lowerCAmelCase = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(_lowerCAmelCase ): if i == 0: _lowerCAmelCase = 0 else: _lowerCAmelCase = support_sizes[i - 1] _lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]] _lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]] _lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _lowerCAmelCase = torch.vstack((p_starts, p_start) ) _lowerCAmelCase = torch.vstack((p_ends, p_end) ) else: _lowerCAmelCase = p_start _lowerCAmelCase = p_end return p_starts, p_ends
664
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase_ = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } UpperCAmelCase_ = { "gpt-neox-20b": 2_0_4_8, } class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , ) _lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCAmelCase ) != add_prefix_space: _lowerCAmelCase = getattr(_lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCAmelCase = add_prefix_space _lowerCAmelCase = pre_tok_class(**_lowerCAmelCase ) _lowerCAmelCase = add_prefix_space def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowerCAmelCase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] ) if len(_lowerCAmelCase ) > self.model_max_length: _lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
664
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
1
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Union[str, Any]: _lowerCAmelCase = os.path.join(args.tf_model_dir , '''parameters.json''' ) _lowerCAmelCase = json.loads(open(_SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): _lowerCAmelCase = args.output + '''.pt''' _lowerCAmelCase = OrderedDict() with tf.device('''/CPU:0''' ): _lowerCAmelCase = tf.train.load_checkpoint(args.tf_model_dir ) _lowerCAmelCase = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _lowerCAmelCase = reader.get_tensor(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): _lowerCAmelCase = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): _lowerCAmelCase = 8 _lowerCAmelCase = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): _lowerCAmelCase = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): _lowerCAmelCase = key_name[-9:-7] for i in range(1_6 ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) _lowerCAmelCase = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): _lowerCAmelCase = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): _lowerCAmelCase = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.norm.bias''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): _lowerCAmelCase = '''model.blocks.%d.feed_forward.norm.weight''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): _lowerCAmelCase = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): _lowerCAmelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _lowerCAmelCase = state[:, 0, :, :] _lowerCAmelCase = state[:, 1, :, :] _lowerCAmelCase = state[:, 2, :, :] _lowerCAmelCase = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): _lowerCAmelCase = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player _lowerCAmelCase = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): _lowerCAmelCase = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _lowerCAmelCase = '''model.blocks.%d.self_attn.norm.bias''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): _lowerCAmelCase = '''model.blocks.%d.self_attn.norm.weight''' % player _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): _lowerCAmelCase = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] _lowerCAmelCase = '''model.%s.weight''' % nlayer _lowerCAmelCase = vnp.copy() # same in embedded _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): _lowerCAmelCase = '''lm_head.weight''' _lowerCAmelCase = vnp.copy() # same in embedded _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): _lowerCAmelCase = '''final_logits_bias''' _lowerCAmelCase = vnp.copy() # same in embedded _lowerCAmelCase = state.reshape((1, -1) ) _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": _lowerCAmelCase = '''model.last_project.weight''' _lowerCAmelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": _lowerCAmelCase = '''model.last_project.bias''' _lowerCAmelCase = vnp.copy() # same because it is one dimensional _lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") UpperCAmelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
664
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
from __future__ import annotations import queue class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def UpperCAmelCase__ ( )->TreeNode: print('''\n********Press N to stop entering at any point of time********\n''' ) _lowerCAmelCase = input('''Enter the value of the root node: ''' ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = f'''Enter the left node of {node_found.data}: ''' _lowerCAmelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or '''n''' if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = left_node q.put(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = f'''Enter the right node of {node_found.data}: ''' _lowerCAmelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or '''n''' if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = right_node q.put(_SCREAMING_SNAKE_CASE ) raise def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return _lowerCAmelCase = queue.Queue() q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return _lowerCAmelCase = queue.Queue() q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=''',''' ) _lowerCAmelCase = n.right def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(_SCREAMING_SNAKE_CASE ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(_SCREAMING_SNAKE_CASE ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "" , _SCREAMING_SNAKE_CASE : Optional[Any]=5_0 , _SCREAMING_SNAKE_CASE : Optional[int]="*" )->str: if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCAmelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
664
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self , _lowerCAmelCase ): return 0.0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int )->tuple[int | float, int | float]: _lowerCAmelCase = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCAmelCase = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int )->None: _lowerCAmelCase = 5_1_2 _lowerCAmelCase = [1] + [0] * (size - 1) _lowerCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _lowerCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = 2_0 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _lowerCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int )->None: _lowerCAmelCase = 5_1_2 _lowerCAmelCase = [1] + [0] * (size - 1) _lowerCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _lowerCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
664
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
1
from timeit import timeit UpperCAmelCase_ = { "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->bool: _lowerCAmelCase = 0 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->bool: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) // 2 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->bool: if len(_SCREAMING_SNAKE_CASE ) <= 2: return True if s[0] == s[len(_SCREAMING_SNAKE_CASE ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->bool: return s == s[::-1] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->None: _lowerCAmelCase = f'''all({name}(key) is value for key, value in test_data.items())''' _lowerCAmelCase = f'''from __main__ import test_data, {name}''' _lowerCAmelCase = 5_0_0_0_0_0 _lowerCAmelCase = timeit(stmt=_SCREAMING_SNAKE_CASE , setup=_SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE ) print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"""{key:21} {value}""") print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
664
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _lowerCAmelCase = get_sagemaker_input() else: _lowerCAmelCase = get_cluster_input() return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str: if subparsers is not None: _lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str: _lowerCAmelCase = get_user_input() if args.config_file is not None: _lowerCAmelCase = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCAmelCase__ ( )->List[Any]: _lowerCAmelCase = config_command_parser() _lowerCAmelCase = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "spm_char.model"} UpperCAmelCase_ = { "vocab_file": { "microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model", "microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model", "microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model", } } UpperCAmelCase_ = { "microsoft/speecht5_asr": 1_0_2_4, "microsoft/speecht5_tts": 1_0_2_4, "microsoft/speecht5_vc": 1_0_2_4, } class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ): _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): return self.sp_model.get_piece_size() def __lowerCAmelCase ( self ): _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ): _lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , _lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): return self.sp_model.piece_to_id(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = [] _lowerCAmelCase = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token _lowerCAmelCase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , '''wb''' ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
664
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets UpperCAmelCase_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" UpperCAmelCase_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" UpperCAmelCase_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="auto" , _lowerCAmelCase=-1 , _lowerCAmelCase=0.9 , _lowerCAmelCase=5 , _lowerCAmelCase=500 , _lowerCAmelCase="gpt2-large" , _lowerCAmelCase=-1 , _lowerCAmelCase=1_024 , _lowerCAmelCase=25 , _lowerCAmelCase=5 , _lowerCAmelCase=True , _lowerCAmelCase=25 , ): _lowerCAmelCase = compute_mauve( p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , ) return out
664
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = process _lowerCAmelCase = params def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): _lowerCAmelCase = self.dataset[i] _lowerCAmelCase = self.process(_lowerCAmelCase , **self.params ) return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): _lowerCAmelCase = loader _lowerCAmelCase = infer _lowerCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase = None _lowerCAmelCase = loader_batch_size # Internal bookkeeping _lowerCAmelCase = None _lowerCAmelCase = None def __len__( self ): return len(self.loader ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first _lowerCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase = next(self.iterator ) _lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase = processed _lowerCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) _lowerCAmelCase = None return self def __lowerCAmelCase ( self ): if self.subiterator is None: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _lowerCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) _lowerCAmelCase = next(self.subiterator ) return processed class UpperCAmelCase ( snake_case_ ): def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _lowerCAmelCase = False _lowerCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size _lowerCAmelCase = processed _lowerCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: _lowerCAmelCase = processed _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) return accumulator class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = key def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return self.dataset[i][self.key] class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = keya _lowerCAmelCase = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
664
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str )->Union[str, Any]: _lowerCAmelCase = s.rsplit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return new.join(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Any: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Optional[Any]: _lowerCAmelCase = {} _lowerCAmelCase = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _lowerCAmelCase = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: _lowerCAmelCase = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): _lowerCAmelCase = rreplace(_SCREAMING_SNAKE_CASE , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): _lowerCAmelCase = rreplace(_SCREAMING_SNAKE_CASE , '''.b''' , '''.bias''' , 1 ) _lowerCAmelCase = value.float() return upgrade @torch.no_grad() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[str]=True )->str: from dall_e import Encoder _lowerCAmelCase = Encoder() if os.path.exists(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = ckpt.state_dict() encoder.load_state_dict(_SCREAMING_SNAKE_CASE ) if config_path is not None: _lowerCAmelCase = FlavaImageCodebookConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = FlavaImageCodebookConfig() _lowerCAmelCase = FlavaImageCodebook(_SCREAMING_SNAKE_CASE ).eval() _lowerCAmelCase = encoder.state_dict() _lowerCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = hf_model.state_dict() _lowerCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) else: return hf_state_dict if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCAmelCase_ = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
664
import numpy class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _lowerCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _lowerCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _lowerCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. _lowerCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _lowerCAmelCase = numpy.zeros(output_array.shape ) def __lowerCAmelCase ( self ): _lowerCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __lowerCAmelCase ( self ): _lowerCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _lowerCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _lowerCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): for iteration in range(1 , iterations + 1 ): _lowerCAmelCase = self.feedforward() self.back_propagation() if give_loss: _lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = input_arr _lowerCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return (value) * (1 - (value)) def UpperCAmelCase__ ( )->int: _lowerCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _lowerCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
1
import qiskit def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )->qiskit.result.counts.Counts: _lowerCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register _lowerCAmelCase = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _lowerCAmelCase = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = single_qubit_measure(2, 2) print(F"""Total count for various states are: {counts}""")
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
664
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool: _lowerCAmelCase = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): _lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
664
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = WavaVecaPhonemeCTCTokenizer SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): super().setUp() _lowerCAmelCase = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) _lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) _lowerCAmelCase = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=20 , _lowerCAmelCase=5 ): _lowerCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )) for i in range(len(_lowerCAmelCase ) )] _lowerCAmelCase = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowerCAmelCase = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowerCAmelCase = toks + toks # toks_str = [t[1] for t in toks] _lowerCAmelCase = [t[0] for t in toks] # Ensure consistency _lowerCAmelCase = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowerCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowerCAmelCase = ''' ''' + output_txt _lowerCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __lowerCAmelCase ( self , **_lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) _lowerCAmelCase = tokenizer('''m xxx ɪ''' , do_phonemize=_lowerCAmelCase ).input_ids self.assertEqual(_lowerCAmelCase , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) _lowerCAmelCase = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_lowerCAmelCase ).input_ids self.assertEqual(_lowerCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa _lowerCAmelCase = tokenizer('''maɪ c''' , do_phonemize=_lowerCAmelCase ).input_ids self.assertEqual(_lowerCAmelCase , [3, 200] ) # mai should be <unk> (=3) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(_lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_lowerCAmelCase ).input_ids , tokenizer(_lowerCAmelCase , do_phonemize=_lowerCAmelCase ).input_ids ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) _lowerCAmelCase = tokenizer.decode(tokenizer(_lowerCAmelCase ).input_ids ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _lowerCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] _lowerCAmelCase = tokenizer.decode(sample_ids[0] ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , batch_tokens[0] ) self.assertEqual(_lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(_lowerCAmelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_lowerCAmelCase ).input_ids , tokenizer(_lowerCAmelCase , do_phonemize=_lowerCAmelCase ).input_ids ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off _lowerCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter _lowerCAmelCase = tokenizer.decode(sample_ids[0] ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , batch_tokens[0] ) self.assertEqual(_lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter _lowerCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , filter_word_delimiter_token=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , batch_tokens[0] ) self.assertEqual(_lowerCAmelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) _lowerCAmelCase = tokenizer.decode(tokenizer(_lowerCAmelCase ).input_ids , filter_word_delimiter_token=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer.phonemize(_lowerCAmelCase , phonemizer_lang='''en-us''' ) _lowerCAmelCase = tokenizer.decode(tokenizer(_lowerCAmelCase ).input_ids , filter_word_delimiter_token=_lowerCAmelCase ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_lowerCAmelCase ) _lowerCAmelCase = '''Hello how are you''' _lowerCAmelCase = tokenizer(_lowerCAmelCase , phonemizer_lang='''en-us''' ).input_ids _lowerCAmelCase = tokenizer(_lowerCAmelCase , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(_lowerCAmelCase , '''ɛ l o h aʊ a ʁ j u''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _lowerCAmelCase = '''Hello how Are you''' _lowerCAmelCase = '''hello how are you''' _lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids _lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off _lowerCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def __lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = [d[key] for d in offsets] return retrieved_list def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" _lowerCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on _lowerCAmelCase = tokenizer.decode(_lowerCAmelCase , output_char_offsets=_lowerCAmelCase , filter_word_delimiter_token=_lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(_lowerCAmelCase , _lowerCAmelCase ): self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertTrue(isinstance(outputs_list[0] , _lowerCAmelCase ) ) # transform list to ModelOutput _lowerCAmelCase = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(_lowerCAmelCase , _lowerCAmelCase ): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): [recursive_check(_lowerCAmelCase , _lowerCAmelCase ) for la, la in zip(_lowerCAmelCase , _lowerCAmelCase )] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off _lowerCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , output_char_offsets=_lowerCAmelCase ) _lowerCAmelCase = [tokenizer.decode(_lowerCAmelCase , output_char_offsets=_lowerCAmelCase ) for ids in sample_ids] check_list_tuples_equal(_lowerCAmelCase , _lowerCAmelCase ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def __lowerCAmelCase ( self ): pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def __lowerCAmelCase ( self ): pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def __lowerCAmelCase ( self ): pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCAmelCase = tokenizer.vocab_size _lowerCAmelCase = len(_lowerCAmelCase ) self.assertNotEqual(_lowerCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _lowerCAmelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] _lowerCAmelCase = tokenizer.add_tokens(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.vocab_size _lowerCAmelCase = len(_lowerCAmelCase ) self.assertNotEqual(_lowerCAmelCase , 0 ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) ) self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase ) ) _lowerCAmelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowerCAmelCase ) self.assertGreaterEqual(len(_lowerCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _lowerCAmelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} _lowerCAmelCase = tokenizer.add_special_tokens(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.vocab_size _lowerCAmelCase = len(_lowerCAmelCase ) self.assertNotEqual(_lowerCAmelCase , 0 ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) ) self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase ) ) _lowerCAmelCase = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowerCAmelCase ) self.assertGreaterEqual(len(_lowerCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __lowerCAmelCase ( self ): pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. _lowerCAmelCase = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCAmelCase = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] _lowerCAmelCase = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(output['''text'''] , _lowerCAmelCase )
664
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCAmelCase : # setable values SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # sigma(t_i) @classmethod def __lowerCAmelCase ( cls ): return cls() @dataclass class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 class UpperCAmelCase ( snake_case_ ,snake_case_ ): @property def __lowerCAmelCase ( self ): return True @register_to_config def __init__( self , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 100 , _lowerCAmelCase = 1.007 , _lowerCAmelCase = 80 , _lowerCAmelCase = 0.05 , _lowerCAmelCase = 50 , ): pass def __lowerCAmelCase ( self ): return KarrasVeSchedulerState.create() def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = () ): _lowerCAmelCase = jnp.arange(0 , _lowerCAmelCase )[::-1].copy() _lowerCAmelCase = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_lowerCAmelCase , schedule=jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , timesteps=_lowerCAmelCase , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): if self.config.s_min <= sigma <= self.config.s_max: _lowerCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: _lowerCAmelCase = 0 # sample eps ~ N(0, S_noise^2 * I) _lowerCAmelCase = random.split(_lowerCAmelCase , num=1 ) _lowerCAmelCase = self.config.s_noise * random.normal(key=_lowerCAmelCase , shape=sample.shape ) _lowerCAmelCase = sigma + gamma * sigma _lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ): _lowerCAmelCase = sample_hat + sigma_hat * model_output _lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat _lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ): _lowerCAmelCase = sample_prev + sigma_prev * model_output _lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev _lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): raise NotImplementedError()
664
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
664
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = crop_size def __lowerCAmelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->List[str]: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _lowerCAmelCase = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if k.startswith('''encoder''' ): _lowerCAmelCase = k.replace('''.attn''' , '''.self_attn''' ) _lowerCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' ) _lowerCAmelCase = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _lowerCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' ) _lowerCAmelCase = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) _lowerCAmelCase = k.replace('''norm3''' , '''final_layer_norm''' ) return k def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->Optional[int]: _lowerCAmelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _lowerCAmelCase = sd.pop(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd _lowerCAmelCase = v UpperCAmelCase_ = ["START"] @torch.no_grad() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[int]: _lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _lowerCAmelCase = model['''model'''] _lowerCAmelCase = BlenderbotConfig.from_json_file(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = BlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = m.model.state_dict().keys() _lowerCAmelCase = [] _lowerCAmelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _lowerCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _lowerCAmelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_SCREAMING_SNAKE_CASE ) m.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) m.half() m.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) UpperCAmelCase_ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
664
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any: return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = ssent.split(''' ''' ) _lowerCAmelCase = csent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str: if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _lowerCAmelCase = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE ) return 1_0_0 * sari_score def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str: _lowerCAmelCase = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _lowerCAmelCase = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = {} result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) return result
664
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase : @staticmethod def __lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase ): pass @is_pipeline_test @require_vision class UpperCAmelCase ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _lowerCAmelCase = image_classifier(_lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], ] , ) @require_tf def __lowerCAmelCase ( self ): _lowerCAmelCase = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _lowerCAmelCase = image_classifier(_lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(_lowerCAmelCase )}, ], ] , ) @slow @require_torch def __lowerCAmelCase ( self ): _lowerCAmelCase = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _lowerCAmelCase = image_classifier(_lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __lowerCAmelCase ( self ): _lowerCAmelCase = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _lowerCAmelCase = image_classifier(_lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
664
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["DeiTFeatureExtractor"] UpperCAmelCase_ = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] * n _lowerCAmelCase = [False] * n _lowerCAmelCase = [False] * n def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): if parent == root: out_edge_count += 1 _lowerCAmelCase = True _lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _lowerCAmelCase = True # AP found via cycle if at == low[to]: _lowerCAmelCase = True else: _lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: _lowerCAmelCase = 0 _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph UpperCAmelCase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
664
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCAmelCase_ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" UpperCAmelCase_ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" UpperCAmelCase_ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )->Dict: return float((preds == labels).mean() ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple )->Dict: _lowerCAmelCase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str )->Optional[int]: _lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = en_sentvecs.shape[0] # mean centering _lowerCAmelCase = en_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) _lowerCAmelCase = in_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) _lowerCAmelCase = cdist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''cosine''' ) _lowerCAmelCase = np.array(range(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = sim.argsort(axis=1 )[:, :1_0] _lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_lowerCAmelCase , _lowerCAmelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
664
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = SMALL_MODEL_IDENTIFIER _lowerCAmelCase = '''pt''' _lowerCAmelCase = '''tf''' def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase ) model_tf.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''mock_framework''' # Framework provided - return whatever the user provides _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
664
1
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging UpperCAmelCase_ = logging.get_logger(__name__) class UpperCAmelCase : SCREAMING_SNAKE_CASE__ = None @experimental def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] )->str: if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return _map_with_joblib(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->Union[str, Any]: _lowerCAmelCase = num_proc if num_proc <= len(_SCREAMING_SNAKE_CASE ) else len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) // num_proc _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) % num_proc _lowerCAmelCase = div * index + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(_SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f'''Error dividing inputs iterable among processes. ''' f'''Total number of objects {len(_SCREAMING_SNAKE_CASE )}, ''' f'''length: {sum(len(i[1] ) for i in split_kwds )}''' ) logger.info( f'''Spawning {num_proc} processes for {len(_SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' ) _lowerCAmelCase , _lowerCAmelCase = None, None if not disable_tqdm: _lowerCAmelCase , _lowerCAmelCase = (RLock(),), tqdm.set_lock with Pool(_SCREAMING_SNAKE_CASE , initargs=_SCREAMING_SNAKE_CASE , initializer=_SCREAMING_SNAKE_CASE ) as pool: _lowerCAmelCase = pool.map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info(f'''Finished {num_proc} processes''' ) _lowerCAmelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(f'''Unpacked {len(_SCREAMING_SNAKE_CASE )} objects''' ) return mapped def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_SCREAMING_SNAKE_CASE ): return joblib.Parallel()( joblib.delayed(_SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _lowerCAmelCase = None
664
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
664
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ = {"UserAgent": UserAgent().random} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict: _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = F'''https://www.instagram.com/{username}/''' _lowerCAmelCase = self.get_json() def __lowerCAmelCase ( self ): _lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text _lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __lowerCAmelCase ( self ): return self.user_data["username"] @property def __lowerCAmelCase ( self ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self ): return self.user_data["biography"] @property def __lowerCAmelCase ( self ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self ): return self.user_data["is_private"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = InstagramUser("github") print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
664
1
from collections import namedtuple import requests from lxml import html # type: ignore UpperCAmelCase_ = namedtuple("covid_data", "cases deaths recovered") def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict = "https://www.worldometers.info/coronavirus/" )->covid_data: _lowerCAmelCase = '''//div[@class = \"maincounter-number\"]/span/text()''' return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
700
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]: _lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ = [] UpperCAmelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCAmelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
664
0
import argparse import json from tqdm import tqdm def UpperCAmelCase__ ( )->Union[str, Any]: _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=__A , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=__A , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=__A , help='''where to store parsed gold_data_path file''' , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: _lowerCAmelCase = json.load(__A ) for dpr_record in tqdm(__A ): _lowerCAmelCase = dpr_record['''question'''] _lowerCAmelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(__A ) + '''\n''' ) if __name__ == "__main__": main()
701
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
0
from heapq import heappop, heappush import numpy as np def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , )->tuple[float | int, list[tuple[int, int]]]: _lowerCAmelCase , _lowerCAmelCase = grid.shape _lowerCAmelCase = [-1, 1, 0, 0] _lowerCAmelCase = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] _lowerCAmelCase , _lowerCAmelCase = [(0, source)], set() _lowerCAmelCase = np.full((rows, cols) , np.inf ) _lowerCAmelCase = 0 _lowerCAmelCase = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = None while queue: ((_lowerCAmelCase) , (_lowerCAmelCase)) = heappop(_SCREAMING_SNAKE_CASE ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: _lowerCAmelCase = [] while (x, y) != source: path.append((x, y) ) _lowerCAmelCase , _lowerCAmelCase = predecessors[x, y] path.append(_SCREAMING_SNAKE_CASE ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase , _lowerCAmelCase = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: _lowerCAmelCase = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) ) _lowerCAmelCase = dist + 1 _lowerCAmelCase = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
0
'''simple docstring''' def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->float: if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate _lowerCAmelCase = rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly _lowerCAmelCase = years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
703
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function UpperCAmelCase_ = 1.054571817E-34 # unit of ℏ : J * s UpperCAmelCase_ = 3E8 # unit of c : m * s^-1 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] )->dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: _lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: _lowerCAmelCase = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _lowerCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
704
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase ( lowercase__ ,lowercase__ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = IFInpaintingSuperResolutionPipeline SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ): return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(__lowerCamelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(__lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) _lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) _lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) _lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) _lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ): super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ): self._test_save_load_local() def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
705
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE__ = '''AutoImageProcessor''' SCREAMING_SNAKE_CASE__ = '''AutoTokenizer''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) _lowerCAmelCase = self.image_processor def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if images is not None: _lowerCAmelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and images is not None: _lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __lowerCAmelCase ( self ): return ["input_ids", "attention_mask", "pixel_values"]
706
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase_ = pytest.mark.integration UpperCAmelCase_ = {"comet"} UpperCAmelCase_ = importlib.util.find_spec("fairseq") is not None UpperCAmelCase_ = {"code_eval"} UpperCAmelCase_ = os.name == "nt" UpperCAmelCase_ = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase_ = importlib.util.find_spec("transformers") is not None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Tuple: @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->Dict: @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Tuple: @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : str ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def UpperCAmelCase__ ( )->List[str]: _lowerCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) @local class UpperCAmelCase ( parameterized.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = '[...]' _lowerCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowercase ) ).module_path ) _lowerCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowercase ) # check parameters _lowerCAmelCase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(_lowercase , metric_module.__name__ ): with self.use_local_metrics(): try: _lowerCAmelCase = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = '[...]' _lowerCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _lowercase ) ).module_path ) # run doctest with self.use_local_metrics(): _lowerCAmelCase = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowercase ): yield else: yield @contextmanager def __lowerCAmelCase ( self ): def load_local_metric(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ): return load_metric(os.path.join('''metrics''' , _lowercase ) , *_lowercase , **_lowercase ) with patch('''datasets.load_metric''' ) as mock_load_metric: _lowerCAmelCase = load_local_metric yield @classmethod def __lowerCAmelCase ( cls , _lowerCAmelCase ): def wrapper(_lowerCAmelCase ): _lowerCAmelCase = contextmanager(_lowercase ) _lowerCAmelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Any: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class UpperCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' def __lowerCAmelCase ( self , _lowerCAmelCase ): assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: _lowerCAmelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->Tuple: import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : int ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: _lowerCAmelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Any: def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Any ): class UpperCAmelCase : '''simple docstring''' def __lowerCAmelCase ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ): assert len(_lowercase ) == 2 _lowerCAmelCase = [0.19, 0.92] return scores, sum(_lowercase ) / len(_lowercase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: _lowerCAmelCase = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: _lowerCAmelCase = load_from_checkpoint yield def UpperCAmelCase__ ( )->Optional[Any]: _lowerCAmelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) _lowerCAmelCase = 'ERROR' _lowerCAmelCase = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _lowerCAmelCase = get_sagemaker_input() else: _lowerCAmelCase = get_cluster_input() return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str: if subparsers is not None: _lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str: _lowerCAmelCase = get_user_input() if args.config_file is not None: _lowerCAmelCase = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCAmelCase__ ( )->List[Any]: _lowerCAmelCase = config_command_parser() _lowerCAmelCase = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class UpperCAmelCase ( __A ): SCREAMING_SNAKE_CASE__ = '''table-transformer''' SCREAMING_SNAKE_CASE__ = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=3 , _lowerCAmelCase=100 , _lowerCAmelCase=6 , _lowerCAmelCase=2_048 , _lowerCAmelCase=8 , _lowerCAmelCase=6 , _lowerCAmelCase=2_048 , _lowerCAmelCase=8 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=256 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , _lowerCAmelCase=False , _lowerCAmelCase="sine" , _lowerCAmelCase="resnet50" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=0.1 , **_lowerCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _lowerCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = backbone_config.get('''model_type''' ) _lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase = config_class.from_dict(_lowerCAmelCase ) # set timm attributes to None _lowerCAmelCase = None, None, None _lowerCAmelCase = use_timm_backbone _lowerCAmelCase = backbone_config _lowerCAmelCase = num_channels _lowerCAmelCase = num_queries _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = init_xavier_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = encoder_layers _lowerCAmelCase = auxiliary_loss _lowerCAmelCase = position_embedding_type _lowerCAmelCase = backbone _lowerCAmelCase = use_pretrained_backbone _lowerCAmelCase = dilation # Hungarian matcher _lowerCAmelCase = class_cost _lowerCAmelCase = bbox_cost _lowerCAmelCase = giou_cost # Loss coefficients _lowerCAmelCase = mask_loss_coefficient _lowerCAmelCase = dice_loss_coefficient _lowerCAmelCase = bbox_loss_coefficient _lowerCAmelCase = giou_loss_coefficient _lowerCAmelCase = eos_coefficient super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): return self.encoder_attention_heads @property def __lowerCAmelCase ( self ): return self.d_model class UpperCAmelCase ( __A ): SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def __lowerCAmelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __lowerCAmelCase ( self ): return 1E-5 @property def __lowerCAmelCase ( self ): return 12
708
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = process _lowerCAmelCase = params def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): _lowerCAmelCase = self.dataset[i] _lowerCAmelCase = self.process(_lowerCAmelCase , **self.params ) return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): _lowerCAmelCase = loader _lowerCAmelCase = infer _lowerCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase = None _lowerCAmelCase = loader_batch_size # Internal bookkeeping _lowerCAmelCase = None _lowerCAmelCase = None def __len__( self ): return len(self.loader ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first _lowerCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase = next(self.iterator ) _lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase = processed _lowerCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) _lowerCAmelCase = None return self def __lowerCAmelCase ( self ): if self.subiterator is None: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _lowerCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) _lowerCAmelCase = next(self.subiterator ) return processed class UpperCAmelCase ( snake_case_ ): def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _lowerCAmelCase = False _lowerCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size _lowerCAmelCase = processed _lowerCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: _lowerCAmelCase = processed _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) return accumulator class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = key def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return self.dataset[i][self.key] class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = keya _lowerCAmelCase = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
664
0
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->str: if not sentence: return "" _lowerCAmelCase = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
710
import numpy class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _lowerCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _lowerCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _lowerCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. _lowerCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _lowerCAmelCase = numpy.zeros(output_array.shape ) def __lowerCAmelCase ( self ): _lowerCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __lowerCAmelCase ( self ): _lowerCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _lowerCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _lowerCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): for iteration in range(1 , iterations + 1 ): _lowerCAmelCase = self.feedforward() self.back_propagation() if give_loss: _lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = input_arr _lowerCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray: return (value) * (1 - (value)) def UpperCAmelCase__ ( )->int: _lowerCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _lowerCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float )->Optional[int]: return 1_0 - x * x def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->Any: if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0: raise ValueError('''Wrong space!''' ) _lowerCAmelCase = a while (b - a) >= 0.01: # Find middle point _lowerCAmelCase = (a + b) / 2 # Check if middle point is root if equation(_lowerCAmelCase ) == 0.0: break # Decide the side to repeat the steps if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0: _lowerCAmelCase = c else: _lowerCAmelCase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=True , _lowerCAmelCase=1 / 255 , _lowerCAmelCase=True , ): _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_pad def __lowerCAmelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False ): if not batched: _lowerCAmelCase = image_inputs[0] if isinstance(_a , Image.Image ): _lowerCAmelCase = image.size else: _lowerCAmelCase = image.shape[1], image.shape[2] if w < h: _lowerCAmelCase = int(self.size['''shortest_edge'''] * h / w ) _lowerCAmelCase = self.size["""shortest_edge"""] elif w > h: _lowerCAmelCase = self.size["""shortest_edge"""] _lowerCAmelCase = int(self.size['''shortest_edge'''] * w / h ) else: _lowerCAmelCase = self.size["""shortest_edge"""] _lowerCAmelCase = self.size["""shortest_edge"""] else: _lowerCAmelCase = [] for image in image_inputs: _lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCAmelCase = max(_a , key=lambda _lowerCAmelCase : item[0] )[0] _lowerCAmelCase = max(_a , key=lambda _lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase ( UpperCamelCase_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = ConditionalDetrImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , '''image_mean''' ) ) self.assertTrue(hasattr(_a , '''image_std''' ) ) self.assertTrue(hasattr(_a , '''do_normalize''' ) ) self.assertTrue(hasattr(_a , '''do_resize''' ) ) self.assertTrue(hasattr(_a , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _a ) _lowerCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _a ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a , batched=_a ) _lowerCAmelCase = image_processing(_a , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_a , return_tensors='''pt''' ).pixel_values _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_a , return_tensors='''pt''' ).pixel_values _lowerCAmelCase = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {"""image_id""": 39_769, """annotations""": target} # encode them _lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) _lowerCAmelCase = image_processing(images=_a , annotations=_a , return_tensors='''pt''' ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _a ) _lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1E-4 ) ) # verify area _lowerCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a ) _lowerCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1E-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) ) # verify class_labels _lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} _lowerCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _lowerCAmelCase = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) _lowerCAmelCase = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors='''pt''' ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _a ) _lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1E-4 ) ) # verify area _lowerCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a ) _lowerCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1E-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) ) # verify class_labels _lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) ) # verify masks _lowerCAmelCase = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _a ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) )
712
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool: _lowerCAmelCase = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): _lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
664
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline UpperCAmelCase_ = { "n_samples": 6_4, "horizon": 3_2, "num_inference_steps": 2_0, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": UpperCAmelCase_ = "hopper-medium-v2" UpperCAmelCase_ = gym.make(env_name) UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) UpperCAmelCase_ = env.reset() UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 1_0_0_0 UpperCAmelCase_ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy UpperCAmelCase_ = pipeline(obs, planning_horizon=3_2) # execute action in environment UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = env.step(denorm_actions) UpperCAmelCase_ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" F""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) UpperCAmelCase_ = next_observation except KeyboardInterrupt: pass print(F"""Total reward: {total_reward}""")
713
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
664
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''xlnet''' SCREAMING_SNAKE_CASE__ = ['''mems'''] SCREAMING_SNAKE_CASE__ = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=32_000 , _lowerCAmelCase=1_024 , _lowerCAmelCase=24 , _lowerCAmelCase=16 , _lowerCAmelCase=4_096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=True , _lowerCAmelCase="bi" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=-1 , _lowerCAmelCase=False , _lowerCAmelCase="last" , _lowerCAmelCase=True , _lowerCAmelCase="tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=5 , _lowerCAmelCase=5 , _lowerCAmelCase=5 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , **_lowerCAmelCase , ): _lowerCAmelCase = vocab_size _lowerCAmelCase = d_model _lowerCAmelCase = n_layer _lowerCAmelCase = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _lowerCAmelCase = d_model // n_head _lowerCAmelCase = ff_activation _lowerCAmelCase = d_inner _lowerCAmelCase = untie_r _lowerCAmelCase = attn_type _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = dropout _lowerCAmelCase = mem_len _lowerCAmelCase = reuse_len _lowerCAmelCase = bi_data _lowerCAmelCase = clamp_len _lowerCAmelCase = same_length _lowerCAmelCase = summary_type _lowerCAmelCase = summary_use_proj _lowerCAmelCase = summary_activation _lowerCAmelCase = summary_last_dropout _lowerCAmelCase = start_n_top _lowerCAmelCase = end_n_top _lowerCAmelCase = bos_token_id _lowerCAmelCase = pad_token_id _lowerCAmelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , _UpperCAmelCase , ) _lowerCAmelCase = kwargs['''use_cache'''] _lowerCAmelCase = use_mems_eval _lowerCAmelCase = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def __lowerCAmelCase ( self ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __lowerCAmelCase ( self , _lowerCAmelCase ): raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
714
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
664
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase_ = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, "tokenizer_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json", }, } UpperCAmelCase_ = { "google/rembert": 2_5_6, } UpperCAmelCase_ = "▁" class UpperCAmelCase ( __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = RemBertTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , **_lowerCAmelCase , ): _lowerCAmelCase = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , ) _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = False if not self.vocab_file else True def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCamelCase ) ) return _lowerCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file , _UpperCamelCase ) return (out_vocab_file,)
715
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCAmelCase ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ): _lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18} _lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = image_size _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = crop_size def __lowerCAmelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowerCAmelCase = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ): # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for video in video_inputs: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
664
0
from math import isclose, sqrt def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str )->Tuple: _lowerCAmelCase = point_y / 4 / point_x _lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _lowerCAmelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _lowerCAmelCase = outgoing_gradient**2 + 4 _lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0 _lowerCAmelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _lowerCAmelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _lowerCAmelCase = x_minus if isclose(_lowerCamelCase , _lowerCamelCase ) else x_plus _lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : str = 1.4 , _SCREAMING_SNAKE_CASE : Optional[Any] = -9.6 )->Optional[Any]: _lowerCAmelCase = 0 _lowerCAmelCase = first_x_coord _lowerCAmelCase = first_y_coord _lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): _lowerCAmelCase = next_point(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
716
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any: return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = ssent.split(''' ''' ) _lowerCAmelCase = csent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str: if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _lowerCAmelCase = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE ) return 1_0_0 * sari_score def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str: _lowerCAmelCase = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _lowerCAmelCase = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = {} result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) return result
664
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCAmelCase : @property def __lowerCAmelCase ( self ): return self.get_dummy_input() @property def __lowerCAmelCase ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __lowerCAmelCase ( self , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ): _lowerCAmelCase = 4 _lowerCAmelCase = 32 _lowerCAmelCase = (32, 32) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = torch.device(__lowerCamelCase ) _lowerCAmelCase = (batch_size, num_channels) + sizes _lowerCAmelCase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) _lowerCAmelCase = {"hidden_states": hidden_states} if include_temb: _lowerCAmelCase = 128 _lowerCAmelCase = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: _lowerCAmelCase = torch.manual_seed(1 ) _lowerCAmelCase = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: _lowerCAmelCase = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: _lowerCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def __lowerCAmelCase ( self ): _lowerCAmelCase = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": _lowerCAmelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): _lowerCAmelCase = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): _lowerCAmelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _lowerCAmelCase = output[0, -1, -3:, -3:] _lowerCAmelCase = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5E-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() _lowerCAmelCase = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): _lowerCAmelCase = output[0] _lowerCAmelCase = torch.device(__lowerCamelCase ) _lowerCAmelCase = randn_tensor(output.shape , device=__lowerCamelCase ) _lowerCAmelCase = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
717
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["DeiTFeatureExtractor"] UpperCAmelCase_ = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
0
import math class UpperCAmelCase : def __init__( self , _lowerCAmelCase=0 ): # a graph with Node 0,1,...,N-1 _lowerCAmelCase = n _lowerCAmelCase = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight _lowerCAmelCase = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = w def __lowerCAmelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _lowerCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): return self.dp[u][v] if __name__ == "__main__": UpperCAmelCase_ = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
718
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] * n _lowerCAmelCase = [False] * n _lowerCAmelCase = [False] * n def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): if parent == root: out_edge_count += 1 _lowerCAmelCase = True _lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _lowerCAmelCase = True # AP found via cycle if at == low[to]: _lowerCAmelCase = True else: _lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: _lowerCAmelCase = 0 _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph UpperCAmelCase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
664
0
import math def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->bool: _lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(_A ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1 / 1_2_3_4_5 )->int: _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 3 while True: _lowerCAmelCase = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(_A ): _lowerCAmelCase = int(_A ) total_partitions += 1 if check_partition_perfect(_A ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(_A ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
719
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCAmelCase ( snake_case_ ): def __lowerCAmelCase ( self ): _lowerCAmelCase = SMALL_MODEL_IDENTIFIER _lowerCAmelCase = '''pt''' _lowerCAmelCase = '''tf''' def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase ) model_tf.save_pretrained(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = '''mock_framework''' # Framework provided - return whatever the user provides _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCAmelCase ) _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCAmelCase , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) _lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ): with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
664
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCAmelCase_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") UpperCAmelCase_ = F"""https://www.google.com/search?q={query}&num=100""" UpperCAmelCase_ = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: UpperCAmelCase_ = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: UpperCAmelCase_ = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["""url"""][0] webbrowser.open(link)
720
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( snake_case_ ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def __lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith('''mps''' ): _lowerCAmelCase = torch.manual_seed(_lowerCAmelCase ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) _lowerCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ): _lowerCAmelCase = '''cpu''' _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase ) _lowerCAmelCase = pipe(**_lowerCAmelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self ): _lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) _lowerCAmelCase = ['''vase''', '''umbrella'''] _lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
664
0
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : Any )->Tuple: print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(__UpperCamelCase ): print(f'''{i}\t\t{d}''' ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : int )->int: for j in range(__UpperCamelCase ): _lowerCAmelCase = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )->Dict: _lowerCAmelCase = [float('''inf''' )] * vertex_count _lowerCAmelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(__UpperCamelCase ): _lowerCAmelCase = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: _lowerCAmelCase = distance[u] + w _lowerCAmelCase = check_negative_cycle(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = int(input("Enter number of vertices: ").strip()) UpperCAmelCase_ = int(input("Enter number of edges: ").strip()) UpperCAmelCase_ = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCAmelCase_ = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCAmelCase_ = {'''src''': src, '''dst''': dest, '''weight''': weight} UpperCAmelCase_ = int(input("\nEnter shortest path source:").strip()) UpperCAmelCase_ = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
721
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ = {"UserAgent": UserAgent().random} def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict: _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCAmelCase : def __init__( self , _lowerCAmelCase ): _lowerCAmelCase = F'''https://www.instagram.com/{username}/''' _lowerCAmelCase = self.get_json() def __lowerCAmelCase ( self ): _lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text _lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def __lowerCAmelCase ( self ): return self.user_data["username"] @property def __lowerCAmelCase ( self ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self ): return self.user_data["biography"] @property def __lowerCAmelCase ( self ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self ): return self.user_data["is_private"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = InstagramUser("github") print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
664
0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] )->str: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _lowerCAmelCase = (boundary[1] - boundary[0]) / steps _lowerCAmelCase = boundary[0] _lowerCAmelCase = boundary[1] _lowerCAmelCase = make_points(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase = 0.0 y += (h / 2.0) * f(snake_case__ ) for i in x_i: # print(i) y += h * f(snake_case__ ) y += (h / 2.0) * f(snake_case__ ) return y def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Tuple: _lowerCAmelCase = a + h while x < (b - h): yield x _lowerCAmelCase = x + h def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->str: # enter your function here _lowerCAmelCase = (x - 0) * (x - 0) return y def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = 0.0 # Lower bound of integration _lowerCAmelCase = 1.0 # Upper bound of integration _lowerCAmelCase = 10.0 # define number of steps or resolution _lowerCAmelCase = [a, b] # define boundary of integration _lowerCAmelCase = method_a(snake_case__ , snake_case__ ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
700
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]: _lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ = [] UpperCAmelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCAmelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
664
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCAmelCase ( lowercase__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
701
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict: # Initialise PyTorch model _lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
664
0
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def UpperCAmelCase__ ( )->Dict: _lowerCAmelCase = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_SCREAMING_SNAKE_CASE , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_SCREAMING_SNAKE_CASE , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_SCREAMING_SNAKE_CASE , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_SCREAMING_SNAKE_CASE , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_SCREAMING_SNAKE_CASE , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_SCREAMING_SNAKE_CASE , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_SCREAMING_SNAKE_CASE , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) _lowerCAmelCase = parser.parse_args() return args def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Optional[Any]: def fn(_SCREAMING_SNAKE_CASE : int ): return tokenizer(examples['''text'''] ) return fn def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->int: _lowerCAmelCase = [] for i in range(len(tokenized_data['''input_ids'''] ) ): _lowerCAmelCase = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } _lowerCAmelCase = tf.train.Features(feature=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = tf.train.Example(features=_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = example.SerializeToString() records.append(_SCREAMING_SNAKE_CASE ) return records def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->List[Any]: _lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _lowerCAmelCase = min(len(_SCREAMING_SNAKE_CASE ) , args.limit ) _lowerCAmelCase = dataset.select(range(_SCREAMING_SNAKE_CASE ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) _lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _lowerCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _lowerCAmelCase = tokenize_function(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_SCREAMING_SNAKE_CASE : Any ): # Concatenate all texts. _lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _lowerCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _lowerCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )] for k, t in concatenated_examples.items() } return result _lowerCAmelCase = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1_0_0_0 , num_proc=4 ) _lowerCAmelCase = 0 _lowerCAmelCase = 0 for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ): _lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size] _lowerCAmelCase = len(dataset_snapshot['''input_ids'''] ) _lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _lowerCAmelCase = get_serialized_examples(_SCREAMING_SNAKE_CASE ) with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file: for i in range(len(_SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = serialized_examples[i] out_file.write(_SCREAMING_SNAKE_CASE ) print('''Wrote file {} containing {} records'''.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
702
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
0
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase ( unittest.TestCase ): def __lowerCAmelCase ( self ): _lowerCAmelCase = 10 def __lowerCAmelCase ( self ): _lowerCAmelCase = [1, 2, 3, 4] _lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A ) def __lowerCAmelCase ( self ): _lowerCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _lowerCAmelCase = process_story(__A ) self.assertEqual(__A , [] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = "" _lowerCAmelCase = process_story(__A ) self.assertEqual(__A , [] ) self.assertEqual(__A , [] ) def __lowerCAmelCase ( self ): _lowerCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _lowerCAmelCase = process_story(__A ) _lowerCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__A , __A ) _lowerCAmelCase = ["It was the best of times."] self.assertEqual(__A , __A ) def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__A , 0 ).numpy() , expected.numpy() ) def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__A , 23 ).numpy() , expected.numpy() ) def __lowerCAmelCase ( self ): _lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__A , 1 ).numpy() , expected.numpy() ) def __lowerCAmelCase ( self ): _lowerCAmelCase = 101 _lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase = compute_token_type_ids(__A , __A ) np.testing.assert_array_equal(__A , __A )
703
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _lowerCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _lowerCAmelCase = i / num_diffusion_timesteps _lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCAmelCase ( snake_case_ ,snake_case_ ): SCREAMING_SNAKE_CASE__ = 1 @register_to_config def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ): if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None: _lowerCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase ) _lowerCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowerCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _lowerCAmelCase = 1.0 - self.betas _lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _lowerCAmelCase = 1.0 # setable values _lowerCAmelCase = None _lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): return sample def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) _lowerCAmelCase = num_inference_steps _lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) self.timesteps += self.config.steps_offset def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ): # 1. get previous step value (=t+1) _lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _lowerCAmelCase = self.alphas_cumprod[timestep] _lowerCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _lowerCAmelCase = model_output elif self.config.prediction_type == "sample": _lowerCAmelCase = model_output _lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _lowerCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __len__( self ): return self.config.num_train_timesteps
664
0
import functools from typing import Any def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] )->str: if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all( isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie _lowerCAmelCase = {} _lowerCAmelCase = "WORD_KEEPER" for word in words: _lowerCAmelCase = trie for c in word: if c not in trie_node: _lowerCAmelCase = {} _lowerCAmelCase = trie_node[c] _lowerCAmelCase = True _lowerCAmelCase = len(_lowerCamelCase ) # Dynamic programming method @functools.cache def is_breakable(_SCREAMING_SNAKE_CASE : int ) -> bool: if index == len_string: return True _lowerCAmelCase = trie for i in range(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase = trie_node.get(string[i] , _lowerCamelCase ) if trie_node is None: return False if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
704
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
664
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def __lowerCAmelCase ( self ): _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def __lowerCAmelCase ( self ): ( _lowerCAmelCase ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraModel(config=_a ) model.to(_a ) model.eval() _lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a ) _lowerCAmelCase = model(_a , token_type_ids=_a ) _lowerCAmelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowerCAmelCase = True _lowerCAmelCase = MraModel(_a ) model.to(_a ) model.eval() _lowerCAmelCase = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) _lowerCAmelCase = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , ) _lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForMaskedLM(config=_a ) model.to(_a ) model.eval() _lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _lowerCAmelCase = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_a ) model.to(_a ) model.eval() _lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_a ) model.to(_a ) model.eval() _lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.prepare_config_and_inputs() ( _lowerCAmelCase ) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = () def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_a , hidden_size=37 ) def __lowerCAmelCase ( self ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def __lowerCAmelCase ( self ): _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def __lowerCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason='''MRA does not output attentions''' ) def __lowerCAmelCase ( self ): return @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_a )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _a ) _lowerCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_a )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _a ) _lowerCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): _lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) _lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_a )[0] _lowerCAmelCase = 50_265 _lowerCAmelCase = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _a ) _lowerCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
705
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( snake_case_ ): SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowerCAmelCase = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __lowerCAmelCase ( self ): _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
664
0
import numpy as np import datasets UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n" UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n" UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = np.array(a_ ) _lowerCAmelCase = np.array(a_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction _lowerCAmelCase = X - np.mean(a_ ) _lowerCAmelCase = np.cov(reference_distribution.T ) try: _lowerCAmelCase = np.linalg.inv(a_ ) except np.linalg.LinAlgError: _lowerCAmelCase = np.linalg.pinv(a_ ) _lowerCAmelCase = np.dot(a_ , a_ ) _lowerCAmelCase = np.dot(a_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
706
from __future__ import annotations def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list: if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(max_value - min_value ) + 1 _lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
664
0
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging UpperCAmelCase_ = logging.get_logger(__name__) class UpperCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE__ = None @experimental def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )->Tuple: if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) return _map_with_joblib(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] )->int: _lowerCAmelCase = num_proc if num_proc <= len(__snake_case ) else len(__snake_case ) _lowerCAmelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(__snake_case ): _lowerCAmelCase = len(__snake_case ) // num_proc _lowerCAmelCase = len(__snake_case ) % num_proc _lowerCAmelCase = div * index + min(__snake_case , __snake_case ) _lowerCAmelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f'''Error dividing inputs iterable among processes. ''' f'''Total number of objects {len(__snake_case )}, ''' f'''length: {sum(len(i[1] ) for i in split_kwds )}''' ) logger.info( f'''Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' ) _lowerCAmelCase , _lowerCAmelCase = None, None if not disable_tqdm: _lowerCAmelCase , _lowerCAmelCase = (RLock(),), tqdm.set_lock with Pool(__snake_case , initargs=__snake_case , initializer=__snake_case ) as pool: _lowerCAmelCase = pool.map(__snake_case , __snake_case ) logger.info(f'''Finished {num_proc} processes''' ) _lowerCAmelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(f'''Unpacked {len(__snake_case )} objects''' ) return mapped def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int )->List[str]: import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__snake_case ): return joblib.Parallel()( joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->int: _lowerCAmelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _lowerCAmelCase = None
707
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _lowerCAmelCase = get_sagemaker_input() else: _lowerCAmelCase = get_cluster_input() return config def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str: if subparsers is not None: _lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str: _lowerCAmelCase = get_user_input() if args.config_file is not None: _lowerCAmelCase = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCAmelCase__ ( )->List[Any]: _lowerCAmelCase = config_command_parser() _lowerCAmelCase = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
664
0
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image UpperCAmelCase_ = ["text", "image", "audio"] def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->List[Any]: _lowerCAmelCase = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): inputs.append(create_inputs(UpperCAmelCase__ ) ) else: raise ValueError(f'''Invalid type requested: {input_type}''' ) return inputs def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List )->List[Any]: _lowerCAmelCase = [] for output in outputs: if isinstance(UpperCAmelCase__ , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(UpperCAmelCase__ , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(UpperCAmelCase__ , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'''Invalid output: {output}''' ) return output_types @is_tool_test class UpperCAmelCase : def __lowerCAmelCase ( self ): self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) _lowerCAmelCase = self.tool.inputs for _input in inputs: if isinstance(_input , lowercase_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) _lowerCAmelCase = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def __lowerCAmelCase ( self ): _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = self.tool(*lowercase_ ) # There is a single output if len(self.tool.outputs ) == 1: _lowerCAmelCase = [outputs] self.assertListEqual(output_types(lowercase_ ) , self.tool.outputs ) def __lowerCAmelCase ( self ): self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = self.tool(*lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): _lowerCAmelCase = [outputs] self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) ) for output, output_type in zip(lowercase_ , self.tool.outputs ): _lowerCAmelCase = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(lowercase_ , lowercase_ ) ) def __lowerCAmelCase ( self ): _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = [] for _input, input_type in zip(lowercase_ , self.tool.inputs ): if isinstance(lowercase_ , lowercase_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error _lowerCAmelCase = self.tool(*lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): _lowerCAmelCase = [outputs] self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
708
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") UpperCAmelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->str: with open(_lowercase , '''rb''' ) as f: _lowerCAmelCase = Image.open(_lowercase ) return im.convert('''RGB''' ) @dataclass class UpperCAmelCase : SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } ,) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE__ = field(default=a__ ,metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE__ = field(default=a__ ,metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0.15 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def __lowerCAmelCase ( self ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class UpperCAmelCase : SCREAMING_SNAKE_CASE__ = field( default='''google/vit-base-patch16-224-in21k''' ,metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ,) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )} ,) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) SCREAMING_SNAKE_CASE__ = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) SCREAMING_SNAKE_CASE__ = field(default=a__ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) SCREAMING_SNAKE_CASE__ = field( default=a__ ,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} ,) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Dict: _lowerCAmelCase = torch.stack([example['''pixel_values'''] for example in examples] ) _lowerCAmelCase = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def UpperCAmelCase__ ( )->Any: _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , _lowercase , _lowercase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(_lowercase ) transformers.utils.logging.set_verbosity(_lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: _lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: _lowerCAmelCase = {} if data_args.train_dir is not None: _lowerCAmelCase = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: _lowerCAmelCase = os.path.join(data_args.validation_dir , '''**''' ) _lowerCAmelCase = load_dataset( '''imagefolder''' , data_files=_lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0: _lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split ) _lowerCAmelCase = split["train"] _lowerCAmelCase = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _lowerCAmelCase = dataset["train"].features["labels"].names _lowerCAmelCase = {}, {} for i, label in enumerate(_lowercase ): _lowerCAmelCase = str(_lowercase ) _lowerCAmelCase = label # Load the accuracy metric from the datasets package _lowerCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : Optional[int] ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) _lowerCAmelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: _lowerCAmelCase = image_processor.size["shortest_edge"] else: _lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"]) _lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) _lowerCAmelCase = Compose( [ RandomResizedCrop(_lowercase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _lowerCAmelCase = Compose( [ Resize(_lowercase ), CenterCrop(_lowercase ), ToTensor(), normalize, ] ) def train_transforms(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(_SCREAMING_SNAKE_CASE : List[Any] ): _lowerCAmelCase = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: _lowerCAmelCase = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_lowercase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: _lowerCAmelCase = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_lowercase ) # Initalize our trainer _lowerCAmelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: _lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: _lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCAmelCase = last_checkpoint _lowerCAmelCase = trainer.train(resume_from_checkpoint=_lowercase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _lowercase ) trainer.save_metrics('''eval''' , _lowercase ) # Write model card and (optionally) push to hub _lowerCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowercase ) else: trainer.create_model_card(**_lowercase ) if __name__ == "__main__": main()
709
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = process _lowerCAmelCase = params def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): _lowerCAmelCase = self.dataset[i] _lowerCAmelCase = self.process(_lowerCAmelCase , **self.params ) return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): _lowerCAmelCase = loader _lowerCAmelCase = infer _lowerCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase = None _lowerCAmelCase = loader_batch_size # Internal bookkeeping _lowerCAmelCase = None _lowerCAmelCase = None def __len__( self ): return len(self.loader ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Convert ModelOutput to tuple first _lowerCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase = next(self.iterator ) _lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase = processed _lowerCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __iter__( self ): _lowerCAmelCase = iter(self.loader ) _lowerCAmelCase = None return self def __lowerCAmelCase ( self ): if self.subiterator is None: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _lowerCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) _lowerCAmelCase = next(self.subiterator ) return processed class UpperCAmelCase ( snake_case_ ): def __iter__( self ): _lowerCAmelCase = iter(self.loader ) return self def __lowerCAmelCase ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _lowerCAmelCase = False _lowerCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator while not is_last: _lowerCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCAmelCase , torch.Tensor ): _lowerCAmelCase = processed else: _lowerCAmelCase = list(processed.keys() )[0] _lowerCAmelCase = processed[key] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = len(_lowerCAmelCase ) else: _lowerCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase = observed_batch_size _lowerCAmelCase = processed _lowerCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase = self.loader_batch_item() _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) if is_last: return accumulator else: _lowerCAmelCase = processed _lowerCAmelCase = item.pop('''is_last''' ) accumulator.append(_lowerCAmelCase ) return accumulator class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = key def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return self.dataset[i][self.key] class UpperCAmelCase ( snake_case_ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = dataset _lowerCAmelCase = keya _lowerCAmelCase = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , _lowerCAmelCase ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
664
0