code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __lowercase : Dict = logging.get_logger(__name__) __lowercase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __lowercase : List[str] = { 'vocab_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json' }, 'merges_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt' }, } __lowercase : Optional[Any] = {'allegro/herbert-base-cased': 5_14} __lowercase : str = {} class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_INIT_CONFIGURATION A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = HerbertTokenizer def __init__( self , __a=None , __a=None , __a=None , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a="</s>" , **__a , ): '''simple docstring''' super().__init__( __a , __a , tokenizer_file=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , sep_token=__a , **__a , ) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Tuple = [self.cls_token_id] __a : str = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self , __a , __a = None , __a = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1] def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Union[str, Any] = [self.sep_token_id] __a : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Optional[int] = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]=[] ): __a : Any = size[0] - overlap_pixels * 2 __a : List[Any] = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels __a : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 __a : Tuple = np.pad(_SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=_SCREAMING_SNAKE_CASE , end_values=0 ) if "l" in remove_borders: __a : str = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: __a : int = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: __a : Optional[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: __a : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ): return max(_SCREAMING_SNAKE_CASE , min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] ): return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : [int] ): __a : int = list(_SCREAMING_SNAKE_CASE ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap __a : str = clamp_rect(_SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ): __a : Dict = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(_SCREAMING_SNAKE_CASE , (original_slice, 0) ) return result def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ): __a : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) __a : Any = tile.crop(_SCREAMING_SNAKE_CASE ) return tile def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = n % d return n - divisor class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a , __a , __a , __a , __a , __a = 350 , ): '''simple docstring''' super().__init__( vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , low_res_scheduler=__a , scheduler=__a , max_noise_level=__a , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , **__a ): '''simple docstring''' torch.manual_seed(0 ) __a : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) __a : List[Any] = add_overlap_rect(__a , __a , image.size ) __a : Tuple = image.crop(__a ) __a : Union[str, Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] __a : Tuple = translated_slice_x - (original_image_slice / 2) __a : Optional[Any] = max(0 , __a ) __a : List[Any] = squeeze_tile(__a , __a , __a , __a ) __a : int = to_input.size __a : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) __a : str = super(__a , self ).__call__(image=__a , **__a ).images[0] __a : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) __a : Tuple = unsqueeze_tile(__a , __a ) __a : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) __a : Dict = [] if x == 0: remove_borders.append('l' ) elif crop_rect[2] == image.size[0]: remove_borders.append('r' ) if y == 0: remove_borders.append('t' ) elif crop_rect[3] == image.size[1]: remove_borders.append('b' ) __a : Dict = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__a ) , mode='L' , ) final_image.paste( __a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __a ) @torch.no_grad() def __call__( self , __a , __a , __a = 75 , __a = 9.0 , __a = 50 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = None , __a = 1 , __a = 128 , __a = 32 , __a = 32 , ): '''simple docstring''' __a : Tuple = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) ) __a : Dict = math.ceil(image.size[0] / tile_size ) __a : Optional[int] = math.ceil(image.size[1] / tile_size ) __a : int = tcx * tcy __a : List[str] = 0 for y in range(__a ): for x in range(__a ): self._process_tile( __a , __a , __a , __a , __a , __a , __a , prompt=__a , num_inference_steps=__a , guidance_scale=__a , noise_level=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , ) current_count += 1 if callback is not None: callback({'progress': current_count / total_tile_count, 'image': final_image} ) return final_image def lowerCamelCase (): # Run a demo __a : Any = 'stabilityai/stable-diffusion-x4-upscaler' __a : int = StableDiffusionTiledUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa ) __a : Union[str, Any] = pipe.to('cuda' ) __a : Union[str, Any] = Image.open('../../docs/source/imgs/diffusers_library.jpg' ) def callback(_SCREAMING_SNAKE_CASE : str ): print(F"""progress: {obj["progress"]:.4f}""" ) obj["image"].save('diffusers_library_progress.jpg' ) __a : Dict = pipe(image=_SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=_SCREAMING_SNAKE_CASE ) final_image.save('diffusers_library.jpg' ) if __name__ == "__main__": main()
27
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=None ): __a : Tuple = None if token is not None: __a : int = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} __a : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" __a : List[str] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() __a : Any = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) __a : Optional[int] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_SCREAMING_SNAKE_CASE ): __a : int = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int=None ): __a : List[str] = None if token is not None: __a : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} __a : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" __a : Dict = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() __a : Union[str, Any] = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) __a : int = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_SCREAMING_SNAKE_CASE ): __a : Any = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ): __a : List[Any] = None if token is not None: __a : int = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} __a : str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = result.headers['Location'] __a : Optional[Any] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE ) __a : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp: fp.write(response.content ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): __a : List[str] = [] __a : int = [] __a : int = None with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_SCREAMING_SNAKE_CASE ) as f: for line in f: __a : List[Any] = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs __a : Tuple = line[: line.index(': ' )] __a : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed __a : Tuple = line[len('FAILED ' ) :] failed_tests.append(_SCREAMING_SNAKE_CASE ) elif filename == "job_name.txt": __a : List[str] = line if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` """ F"""and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ' problem.' ) __a : Optional[Any] = None if job_name and job_links: __a : Tuple = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # A list with elements of the form (line of error, error, failed test) __a : str = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return result def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=None ): __a : Optional[int] = [] __a : str = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) ) return errors def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any=None ): __a : List[Any] = Counter() counter.update([x[1] for x in logs] ) __a : Dict = counter.most_common() __a : str = {} for error, count in counts: if error_filter is None or error not in error_filter: __a : Union[str, Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} __a : List[str] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) ) return r def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Any = test.split('::' )[0] if test.startswith('tests/models/' ): __a : Tuple = test.split('/' )[2] else: __a : Union[str, Any] = None return test def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] __a : Dict = [x for x in logs if x[2] is not None] __a : Any = {x[2] for x in logs} __a : Optional[int] = {} for test in tests: __a : Optional[Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) __a : int = counter.most_common() __a : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} __a : List[str] = sum(error_counts.values() ) if n_errors > 0: __a : Optional[Any] = {'count': n_errors, 'errors': error_counts} __a : Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) ) return r def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Any = '| no. | error | status |' __a : Optional[int] = '|-:|:-|:-|' __a : Any = [header, sep] for error in reduced_by_error: __a : Union[str, Any] = reduced_by_error[error]['count'] __a : Union[str, Any] = F"""| {count} | {error[:100]} | |""" lines.append(_SCREAMING_SNAKE_CASE ) return "\n".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Union[str, Any] = '| model | no. of errors | major error | count |' __a : Union[str, Any] = '|-:|-:|-:|-:|' __a : Any = [header, sep] for model in reduced_by_model: __a : Optional[int] = reduced_by_model[model]['count'] __a , __a : Any = list(reduced_by_model[model]['errors'].items() )[0] __a : Optional[int] = F"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(_SCREAMING_SNAKE_CASE ) return "\n".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __lowercase : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __lowercase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token) __lowercase : str = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __lowercase : List[Any] = k.find(' / ') __lowercase : Tuple = k[index + len(' / ') :] __lowercase : Union[str, Any] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __lowercase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __lowercase : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __lowercase : Union[str, Any] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __lowercase : str = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __lowercase : str = reduce_by_error(errors) __lowercase : int = reduce_by_model(errors) __lowercase : Optional[int] = make_github_table(reduced_by_error) __lowercase : Tuple = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
27
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import numpy as np import qiskit def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 8 , _SCREAMING_SNAKE_CASE : int | None = None ): __a : int = np.random.default_rng(seed=_SCREAMING_SNAKE_CASE ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __a : Dict = 6 * key_len # Measurement basis for Alice's qubits. __a : str = rng.integers(2 , size=_SCREAMING_SNAKE_CASE ) # The set of states Alice will prepare. __a : Dict = rng.integers(2 , size=_SCREAMING_SNAKE_CASE ) # Measurement basis for Bob's qubits. __a : List[str] = rng.integers(2 , size=_SCREAMING_SNAKE_CASE ) # Quantum Circuit to simulate BB84 __a : Union[str, Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_SCREAMING_SNAKE_CASE ): if alice_state[index] == 1: bbaa_circ.x(_SCREAMING_SNAKE_CASE ) if alice_basis[index] == 1: bbaa_circ.h(_SCREAMING_SNAKE_CASE ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_SCREAMING_SNAKE_CASE ): if bob_basis[index] == 1: bbaa_circ.h(_SCREAMING_SNAKE_CASE ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __a : str = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __a : List[Any] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1 , seed_simulator=_SCREAMING_SNAKE_CASE ) # Returns the result of measurement. __a : Tuple = job.result().get_counts(_SCREAMING_SNAKE_CASE ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __a : int = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __a : Any = gen_key[:key_len] if len(_SCREAMING_SNAKE_CASE ) >= key_len else gen_key.ljust(_SCREAMING_SNAKE_CASE , '0' ) return key if __name__ == "__main__": print(f'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
27
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
1
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : Any = {'vocab_file': 'spiece.model'} __lowercase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __lowercase : Tuple = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["input_ids", "attention_mask"] A_ = [] def __init__( self , __a , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a="[SEP]" , __a="[MASK]" , __a="[CLS]" , __a = None , **__a , ): '''simple docstring''' __a : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token __a : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token __a : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token __a : Optional[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token __a : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token __a : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __a : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __a : Tuple = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__a ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.sp_model.get_piece_size() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' __a : Dict = self.__dict__.copy() __a : Tuple = None return state def __setstate__( self , __a ): '''simple docstring''' __a : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Any = {} __a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.sp_model.encode(__a , out_type=__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.sp_model.piece_to_id(__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : str = self.sp_model.IdToPiece(__a ) return token def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[str] = [] __a : Tuple = '' __a : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__a ) + token __a : Any = True __a : Optional[int] = [] else: current_sub_tokens.append(__a ) __a : Optional[int] = False out_string += self.sp_model.decode(__a ) return out_string.strip() def __UpperCAmelCase ( self , __a , __a = False , __a = None , __a = True , **__a , ): '''simple docstring''' __a : Tuple = kwargs.pop('use_source_tokenizer' , __a ) __a : Optional[int] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __a : int = [] __a : Union[str, Any] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__a ) ) __a : Union[str, Any] = [] sub_texts.append(__a ) else: current_sub_text.append(__a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __a : List[str] = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(__a ) ) else: __a : str = ''.join(__a ) __a : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __a : Any = self.clean_up_tokenization(__a ) return clean_text else: return text def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : List[Any] = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , 'wb' ) as fi: __a : Tuple = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : Optional[int] = [self.cls_token_id] __a : Dict = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self , __a , __a = None , __a = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1] def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Any = [self.sep_token_id] __a : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
27
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowercase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() self.register_modules(unet=__a , scheduler=__a ) @torch.no_grad() def __call__( self , __a = 1 , __a = 100 , __a = None , __a = None , __a = True , ): '''simple docstring''' if audio_length_in_s is None: __a : Dict = self.unet.config.sample_size / self.unet.config.sample_rate __a : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate __a : str = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) __a : Tuple = int(__a ) if sample_size % down_scale_factor != 0: __a : Dict = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ' process.' ) __a : Tuple = int(__a ) __a : List[Any] = next(iter(self.unet.parameters() ) ).dtype __a : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__a , __a ) and len(__a ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __a : Tuple = randn_tensor(__a , generator=__a , device=self.device , dtype=__a ) # set step values self.scheduler.set_timesteps(__a , device=audio.device ) __a : str = self.scheduler.timesteps.to(__a ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __a : Tuple = self.unet(__a , __a ).sample # 2. compute previous image: x_t -> t_t-1 __a : str = self.scheduler.step(__a , __a , __a ).prev_sample __a : Union[str, Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() __a : List[str] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__a )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "EncodecFeatureExtractor" A_ = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , __a , __a ): '''simple docstring''' super().__init__(__a , __a ) __a : Tuple = self.feature_extractor __a : Dict = False def __UpperCAmelCase ( self , __a=None , __a=None , __a=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a ) def __call__( self , *__a , **__a ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__a , **__a ) __a : Optional[Any] = kwargs.pop('audio' , __a ) __a : Union[str, Any] = kwargs.pop('sampling_rate' , __a ) __a : Optional[Any] = kwargs.pop('text' , __a ) if len(__a ) > 0: __a : Any = args[0] __a : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: __a : int = self.tokenizer(__a , **__a ) if audio is not None: __a : Optional[int] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) if audio is None: return inputs elif text is None: return audio_inputs else: __a : Dict = audio_inputs['input_values'] if "padding_mask" in audio_inputs: __a : Optional[Any] = audio_inputs['padding_mask'] return inputs def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' __a : Tuple = kwargs.pop('audio' , __a ) __a : int = kwargs.pop('padding_mask' , __a ) if len(__a ) > 0: __a : Any = args[0] __a : List[str] = args[1:] if audio_values is not None: return self._decode_audio(__a , padding_mask=__a ) else: return self.tokenizer.batch_decode(*__a , **__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : str = to_numpy(__a ) __a , __a , __a : Union[str, Any] = audio_values.shape if padding_mask is None: return list(__a ) __a : Optional[int] = to_numpy(__a ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __a : Union[str, Any] = seq_len - padding_mask.shape[-1] __a : List[str] = 1 - self.feature_extractor.padding_value __a : List[Any] = np.pad(__a , ((0, 0), (0, difference)) , 'constant' , constant_values=__a ) __a : Optional[int] = audio_values.tolist() for i in range(__a ): __a : Any = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __a : Optional[Any] = sliced_audio.reshape(__a , -1 ) return audio_values
27
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
1
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class __UpperCamelCase : def __init__( self , __a , __a=14 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : Union[str, Any] = parent __a : str = batch_size __a : Optional[int] = seq_length __a : Any = is_training __a : Tuple = use_token_type_ids __a : Optional[int] = use_input_mask __a : Optional[int] = use_labels __a : Dict = use_mc_token_ids __a : Union[str, Any] = vocab_size __a : Optional[Any] = hidden_size __a : Optional[int] = num_hidden_layers __a : List[str] = num_attention_heads __a : List[Any] = intermediate_size __a : int = hidden_act __a : List[str] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : Union[str, Any] = type_vocab_size __a : int = type_sequence_label_size __a : Union[str, Any] = initializer_range __a : Optional[Any] = num_labels __a : List[str] = num_choices __a : Any = scope __a : List[Any] = self.vocab_size - 1 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Union[str, Any] = None if self.use_input_mask: __a : Any = random_attention_mask([self.batch_size, self.seq_length] ) __a : Tuple = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None if self.use_mc_token_ids: __a : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __a : List[Any] = None __a : Dict = None __a : Tuple = None if self.use_labels: __a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : Any = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() __a : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __UpperCAmelCase ( self ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : Optional[int] = CTRLModel(config=__a ) model.to(__a ) model.eval() model(__a , token_type_ids=__a , head_mask=__a ) model(__a , token_type_ids=__a ) __a : Any = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : Dict = CTRLLMHeadModel(__a ) model.to(__a ) model.eval() __a : List[str] = model(__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Any = config_and_inputs __a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask} return config, inputs_dict def __UpperCAmelCase ( self , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : Any = self.num_labels __a : Union[str, Any] = CTRLForSequenceClassification(__a ) model.to(__a ) model.eval() __a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : str = model(__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = CTRLModelTester(self ) __a : str = ConfigTester(self , config_class=__a , n_embd=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Dict = CTRLModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __UpperCAmelCase ( self ): '''simple docstring''' pass @require_torch class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = CTRLLMHeadModel.from_pretrained('ctrl' ) model.to(__a ) __a : Union[str, Any] = torch.tensor( [[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is __a : List[Any] = [ 1_1859, 0, 1611, 8, 5, 150, 2_6449, 2, 19, 348, 469, 3, 2595, 48, 2_0740, 24_6533, 24_6533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __a : List[str] = model.generate(__a , do_sample=__a ) self.assertListEqual(output_ids[0].tolist() , __a )
27
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowercase : List[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = ["pixel_values"] def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = None , __a = True , **__a , ): '''simple docstring''' super().__init__(**__a ) __a : int = size if size is not None else {'height': 384, 'width': 384} __a : Any = get_size_dict(__a , default_to_square=__a ) __a : Union[str, Any] = do_resize __a : Tuple = size __a : Optional[int] = resample __a : str = do_rescale __a : List[Any] = rescale_factor __a : Tuple = do_normalize __a : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __a : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD __a : str = do_convert_rgb def __UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ): '''simple docstring''' __a : str = get_size_dict(__a , default_to_square=__a ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) __a : Optional[Any] = (size['height'], size['width']) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __UpperCAmelCase ( self , __a , __a , __a = None , **__a , ): '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ): '''simple docstring''' return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ): '''simple docstring''' __a : str = do_resize if do_resize is not None else self.do_resize __a : Tuple = resample if resample is not None else self.resample __a : Dict = do_rescale if do_rescale is not None else self.do_rescale __a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor __a : str = do_normalize if do_normalize is not None else self.do_normalize __a : Optional[int] = image_mean if image_mean is not None else self.image_mean __a : Any = image_std if image_std is not None else self.image_std __a : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __a : Any = size if size is not None else self.size __a : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __a : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __a : List[Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __a : int = [to_numpy_array(__a ) for image in images] if do_resize: __a : Any = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_rescale: __a : Union[str, Any] = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __a : List[str] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __a : Tuple = [to_channel_dimension_format(__a , __a ) for image in images] __a : int = BatchFeature(data={'pixel_values': images} , tensor_type=__a ) return encoded_outputs
27
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int] ): if not numbers: return 0 if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) or not all( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) __a : Optional[Any] = numbers[0] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # update the maximum and minimum subarray products __a : Optional[int] = numbers[i] if number < 0: __a , __a : List[Any] = min_till_now, max_till_now __a : str = max(_SCREAMING_SNAKE_CASE , max_till_now * number ) __a : Tuple = min(_SCREAMING_SNAKE_CASE , min_till_now * number ) # update the maximum product found till now __a : List[str] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max_prod
27
'''simple docstring''' import torch from transformers import AutoModel class __UpperCamelCase ( torch.nn.Module ): def __init__( self , __a="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__a , self ).__init__() __a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a ) __a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 ) __a : Union[str, Any] = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return self.bert(**__a ).last_hidden_state def __UpperCAmelCase ( self , __a ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__a ) def __UpperCAmelCase ( self , __a , __a , __a=1 ): '''simple docstring''' return self.softmax(T * self.cos(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = W_supports['sizes'].tolist() __a : Union[str, Any] = W_supports['start_token_id'].item() __a : Any = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __a : Tuple = self.BERT(**__a ) __a : str = self.BERT(**__a ) __a : Any = None __a : Dict = None __a : Dict = W_supports['input_ids'] == start_token_id __a : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: __a : Optional[int] = 0 else: __a : Union[str, Any] = support_sizes[i - 1] __a : int = S[s : s + size][start_token_masks[s : s + size]] __a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] __a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __a : str = torch.vstack((p_starts, p_start) ) __a : str = torch.vstack((p_ends, p_end) ) else: __a : List[str] = p_start __a : int = p_end return p_starts, p_ends
27
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCAmelCase_ ): A_ = ["image_processor", "tokenizer"] A_ = "FlavaImageProcessor" A_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self , __a=None , __a=None , **__a ): '''simple docstring''' __a : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __a , ) __a : int = kwargs.pop('feature_extractor' ) __a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__a , __a ) __a : Optional[int] = self.image_processor def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = False , __a = None , __a = 0 , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __a : Union[str, Any] = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) if images is not None: __a : Optional[Any] = self.image_processor( __a , return_image_mask=__a , return_codebook_pixels=__a , return_tensors=__a , **__a , ) if text is not None and images is not None: encoding.update(__a ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.tokenizer.model_input_names __a : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , ) return self.image_processor_class @property def __UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , ) return self.image_processor
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowercase : Dict = { 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_28, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 50, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 10, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 10, 'exponential_decay_length_penalty': (5, 1.01), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : int = TOKEN HfFolder.save_token(__a ) @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-config' ) except HTTPError: pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('test-config' , use_auth_token=self._token ) __a : Dict = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) # Reset repo delete_repo(token=self._token , repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token ) __a : List[str] = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token ) __a : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token ) __a : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' CustomConfig.register_for_auto_class() __a : str = CustomConfig(attribute=42 ) config.push_to_hub('test-dynamic-config' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} ) __a : Tuple = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=__a ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' ) self.assertEqual(new_config.attribute , 42 ) class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __a : Dict = c.n_embd + 1 # int __a : List[str] = c.resid_pdrop + 1.0 # float __a : Optional[int] = not c.scale_attn_weights # bool __a : Union[str, Any] = c.summary_type + 'foo' # str c.update_from_string( f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' ) self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' ) self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' ) self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = PretrainedConfig() __a : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( __a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) __a : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )] if len(__a ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' f""" {", ".join(__a )}.""" ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # config is in subfolder, the following should not work without specifying the subfolder __a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) __a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' ) self.assertIsNotNone(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = mock.Mock() __a : Dict = 500 __a : Any = {} __a : int = HTTPError __a : Tuple = {} # Download this model to make sure it's in the cache. __a : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=__a ) as mock_head: __a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoConfig.from_pretrained('bert-base-cased' ) __a : Optional[Any] = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(__a ) __a : List[str] = 2 json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __a : Optional[int] = AutoConfig.from_pretrained(__a ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __a : str = ['config.42.0.0.json'] __a : List[Any] = 768 configuration.save_pretrained(__a ) shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) ) __a : List[str] = AutoConfig.from_pretrained(__a ) self.assertEqual(new_configuration.hidden_size , 768 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = 'hf-internal-testing/test-two-configs' import transformers as new_transformers __a : Optional[Any] = 'v4.0.0' __a , __a : Any = new_transformers.models.auto.AutoConfig.from_pretrained( __a , return_unused_kwargs=__a ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(__a , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __a : Dict = 'v3.0.0' __a : str = old_transformers.models.auto.AutoConfig.from_pretrained(__a ) self.assertEqual(old_configuration.hidden_size , 768 )
27
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ort.SessionOptions() __a : Dict = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) __a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default __a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'A red cat sitting on a park bench' __a : int = np.random.RandomState(0 ) __a : Tuple = pipe( prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , ) __a : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
27
1
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __lowercase : Tuple = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __lowercase : Any = f'''https://www.google.com/search?q={query}&num=100''' __lowercase : Optional[int] = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __lowercase : int = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __lowercase : Optional[Any] = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
27
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase : Dict = 16 __lowercase : List[Any] = 32 def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __a : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *__a ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() __a : Dict = torch.cuda.memory_allocated() __a : List[Any] = torch.cuda.max_memory_allocated() __a : Tuple = bamb(self.end - self.begin ) __a : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ): __a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : List[Any] = load_dataset( 'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} ) def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ): # max_length=None => use the model max length (it's actually the default) __a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __a : List[str] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __a : int = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) __a : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Initialize accelerator __a : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Dict = config['lr'] __a : str = int(config['num_epochs'] ) __a : Optional[int] = int(config['seed'] ) __a : Any = int(config['batch_size'] ) __a : List[str] = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) __a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer __a : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __a : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __a : Union[str, Any] = 1 __a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __a : str = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: __a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __a : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly __a : Dict = 0 # Now we train the model __a : str = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): __a : List[Any] = model(**_SCREAMING_SNAKE_CASE ) __a : str = outputs.loss __a : str = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , ) __a : List[str] = parser.parse_args() __a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = tempfile.mkdtemp() # fmt: off __a : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __a : List[Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __a : Dict = {'unk_token': '<unk>'} __a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) __a : List[str] = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48145466, 0.4578275, 0.40821073], 'image_std': [0.26862954, 0.26130258, 0.27577711], } __a : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.get_tokenizer() __a : Optional[Any] = self.get_rust_tokenizer() __a : Dict = self.get_image_processor() __a : Any = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) processor_slow.save_pretrained(self.tmpdirname ) __a : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a ) __a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) processor_fast.save_pretrained(self.tmpdirname ) __a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __a ) self.assertIsInstance(processor_fast.tokenizer , __a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __a ) self.assertIsInstance(processor_fast.image_processor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __a : Union[str, Any] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.get_image_processor() __a : Optional[int] = self.get_tokenizer() __a : str = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) __a : Dict = self.prepare_image_inputs() __a : Tuple = image_processor(__a , return_tensors='np' ) __a : List[Any] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_image_processor() __a : List[str] = self.get_tokenizer() __a : Optional[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) __a : Optional[Any] = 'lower newer' __a : Union[str, Any] = processor(text=__a ) __a : str = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.get_image_processor() __a : Dict = self.get_tokenizer() __a : List[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) __a : List[str] = 'lower newer' __a : Optional[int] = self.prepare_image_inputs() __a : Tuple = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.get_image_processor() __a : Dict = self.get_tokenizer() __a : Tuple = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) __a : Dict = self.prepare_image_inputs() __a : List[str] = self.prepare_image_inputs() __a : int = processor(images=__a , visual_prompt=__a ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.get_image_processor() __a : Union[str, Any] = self.get_tokenizer() __a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a ) __a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Any = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a )
27
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowercase : List[Any] = 'bart' __lowercase : Union[str, Any] = True @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __a : Optional[int] = qar_model.eval() else: __a , __a : str = (None, None) if MODEL_TYPE == "bart": __a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __a : str = sas_model.eval() else: __a , __a : Tuple = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : Optional[Any] = faiss.StandardGpuResources() __a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __a : int = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) __a : int = faiss.IndexFlatIP(128 ) __a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE ) wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU else: __a , __a : str = (None, None) __a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __a : Dict = elia['train_eli5'] __a : Optional[int] = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) __a : str = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_SCREAMING_SNAKE_CASE ) return (elia_train, eli5_train_q_index) __lowercase , __lowercase , __lowercase : Any = load_indexes() __lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models() __lowercase , __lowercase : int = load_train_data() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ): __a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]] return nn_examples def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ): if source == "none": __a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a : str = query_qa_dense_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a , __a : Union[str, Any] = query_es_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , ) __a : Dict = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None), } ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ): with torch.no_grad(): __a : Union[str, Any] = qa_sas_generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __lowercase : Dict = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options') if demo_options: __lowercase : Any = st.sidebar.selectbox( '', action_list, index=3, ) __lowercase : Tuple = action_list.index(action_st) __lowercase : Tuple = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __lowercase : List[Any] = show_type == 'Show full text of passages' else: __lowercase : int = 3 __lowercase : str = True __lowercase : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __lowercase : str = 'wiki40b' __lowercase : List[Any] = 'dense' __lowercase : Dict = 'beam' __lowercase : Optional[int] = 2 __lowercase : List[str] = 64 __lowercase : Tuple = 2_56 __lowercase : List[str] = None __lowercase : Tuple = None __lowercase : List[Any] = st.sidebar.checkbox('Generation options') if generate_options: __lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __lowercase : Tuple = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) __lowercase : int = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": __lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowercase : Dict = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __lowercase : Union[str, Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __lowercase : List[str] = None # start main text __lowercase : int = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowercase : Any = st.text_input('Enter your question here:', '') else: __lowercase : Any = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10) __lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) __lowercase : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowercase : str = support_list[:10] __lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __lowercase , __lowercase : int = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __lowercase : Any = res[1].strip() if sec_titles == "": __lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __lowercase : Union[str, Any] = sec_titles.split(' & ') __lowercase : str = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __lowercase : str = find_nearest_training(question) __lowercase : Optional[int] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __lowercase : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
27
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dataset , _SCREAMING_SNAKE_CASE : Dict[str, str] ): __a : List[str] = args.log_outputs __a : Optional[Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __a : Union[str, Any] = load_metric('wer' ) __a : str = load_metric('cer' ) # compute metrics __a : Optional[int] = wer.compute(references=result['target'] , predictions=result['prediction'] ) __a : Any = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __a : Dict = F"""WER: {wer_result}\nCER: {cer_result}""" print(_SCREAMING_SNAKE_CASE ) with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __a : List[str] = F"""log_{dataset_id}_predictions.txt""" __a : Union[str, Any] = F"""log_{dataset_id}_targets.txt""" with open(_SCREAMING_SNAKE_CASE , 'w' ) as p, open(_SCREAMING_SNAKE_CASE , 'w' ) as t: # mapping function to write output def write_to_file(_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): p.write(F"""{i}""" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F"""{i}""" + '\n' ) t.write(batch['target'] + '\n' ) result.map(_SCREAMING_SNAKE_CASE , with_indices=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __a : Optional[int] = re.sub(_SCREAMING_SNAKE_CASE , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __a : int = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __a : Any = ' '.join(text.split(_SCREAMING_SNAKE_CASE ) ) return text def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): # load dataset __a : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_SCREAMING_SNAKE_CASE ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __a : int = AutoFeatureExtractor.from_pretrained(args.model_id ) __a : List[str] = feature_extractor.sampling_rate # resample audio __a : List[str] = dataset.cast_column('audio' , Audio(sampling_rate=_SCREAMING_SNAKE_CASE ) ) # load eval pipeline if args.device is None: __a : List[Any] = 0 if torch.cuda.is_available() else -1 __a : int = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_SCREAMING_SNAKE_CASE : int ): __a : List[Any] = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __a : List[Any] = prediction['text'] __a : List[Any] = normalize_text(batch['sentence'] ) return batch # run inference on all examples __a : List[Any] = dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) __lowercase : Union[str, Any] = parser.parse_args() main(args)
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Tuple = logging.get_logger(__name__) __lowercase : List[Any] = torch.device('cpu') def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = [] for k in state_dict.keys(): __a : List[Any] = k if ".pwconv" in k: __a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __a : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __a : Union[str, Any] = k_new.split('.' ) if ls[2].isdigit(): __a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Union[str, Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : str = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Any = idalabel __a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __a : Dict = [3, 3, 6, 4] __a : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __a : Dict = [3, 3, 9, 6] __a : List[str] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __a : Dict = [4, 3, 10, 5] __a : Optional[int] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __a : Tuple = [4, 4, 12, 6] __a : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : Optional[Any] = checkpoint __a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs __a : Tuple = prepare_img() __a : str = ViTImageProcessor.from_pretrained('preprocessor_config' ) __a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE ) __a : Dict = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') __lowercase : Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
27
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCAmelCase_ ): A_ = (DDIMParallelScheduler,) A_ = (("eta", 0.0), ("num_inference_steps", 50)) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : Optional[Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**__a ) return config def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : int = self.scheduler_classes[0] __a : List[Any] = self.get_scheduler_config(**__a ) __a : Tuple = scheduler_class(**__a ) __a , __a : Optional[Any] = 10, 0.0 __a : List[str] = self.dummy_model() __a : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(__a ) for t in scheduler.timesteps: __a : Dict = model(__a , __a ) __a : Tuple = scheduler.step(__a , __a , __a , __a ).prev_sample return sample def __UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__a ) __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __UpperCAmelCase ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def __UpperCAmelCase ( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__a , num_inference_steps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__a , eta=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : List[Any] = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.scheduler_classes[0] __a : Any = self.get_scheduler_config() __a : int = scheduler_class(**__a ) __a , __a : int = 10, 0.0 scheduler.set_timesteps(__a ) __a : Optional[int] = self.dummy_model() __a : List[Any] = self.dummy_sample_deter __a : Union[str, Any] = self.dummy_sample_deter + 0.1 __a : List[Any] = self.dummy_sample_deter - 0.1 __a : Dict = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Dict = torch.arange(__a )[0:3, None].repeat(1 , __a ) __a : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : Union[str, Any] = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __a ) __a : Any = torch.sum(torch.abs(__a ) ) __a : Tuple = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.full_loop() __a : Optional[Any] = torch.sum(torch.abs(__a ) ) __a : str = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.full_loop(prediction_type='v_prediction' ) __a : List[str] = torch.sum(torch.abs(__a ) ) __a : str = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) __a : List[str] = torch.sum(torch.abs(__a ) ) __a : Union[str, Any] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) __a : Optional[Any] = torch.sum(torch.abs(__a ) ) __a : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
27
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "umt5" A_ = ["past_key_values"] def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ): '''simple docstring''' super().__init__( is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , ) __a : Any = vocab_size __a : Any = d_model __a : str = d_kv __a : Dict = d_ff __a : Union[str, Any] = num_layers __a : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a : Optional[int] = num_heads __a : Tuple = relative_attention_num_buckets __a : Optional[Any] = relative_attention_max_distance __a : Optional[int] = dropout_rate __a : List[Any] = layer_norm_epsilon __a : int = initializer_factor __a : Union[str, Any] = feed_forward_proj __a : Any = use_cache __a : List[Any] = self.feed_forward_proj.split('-' ) __a : Dict = act_info[-1] __a : Dict = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) if feed_forward_proj == "gated-gelu": __a : Optional[int] = 'gelu_new' @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.d_model @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_heads @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_layers class __UpperCamelCase ( lowerCAmelCase_ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __a : Dict = 'past_encoder_sequence + sequence' __a : Tuple = {0: 'batch'} __a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __a : List[Any] = {0: 'batch', 1: 'decoder_sequence'} __a : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __UpperCAmelCase ( self ): '''simple docstring''' return 13 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 5E-4
27
1
'''simple docstring''' import argparse import os import re __lowercase : Optional[int] = 'src/diffusers' # Pattern that looks at the indentation in a line. __lowercase : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __lowercase : str = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __lowercase : Optional[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __lowercase : List[Any] = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __lowercase : Optional[int] = re.compile(R'\[([^\]]+)\]') def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Optional[int] = _re_indent.search(_SCREAMING_SNAKE_CASE ) return "" if search is None else search.groups()[0] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]="" , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=None ): __a : Optional[Any] = 0 __a : str = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_SCREAMING_SNAKE_CASE ): index += 1 __a : List[Any] = ['\n'.join(lines[:index] )] else: __a : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __a : Dict = [lines[index]] index += 1 while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) if index < len(_SCREAMING_SNAKE_CASE ) - 1: __a : Any = [lines[index + 1]] index += 1 else: __a : Tuple = [] else: blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_SCREAMING_SNAKE_CASE ) > 0: blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): def _inner(_SCREAMING_SNAKE_CASE : Optional[int] ): return key(_SCREAMING_SNAKE_CASE ).lower().replace('_' , '' ) return _inner def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): # If no key is provided, we use a noop. def noop(_SCREAMING_SNAKE_CASE : int ): return x if key is None: __a : Union[str, Any] = noop # Constants are all uppercase, they go first. __a : Any = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __a : List[str] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()] # Functions begin with a lowercase, they go last. __a : Optional[Any] = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()] __a : Any = ignore_underscore(_SCREAMING_SNAKE_CASE ) return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): # This inner function sort imports between [ ]. def _replace(_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[int] = match.groups()[0] if "," not in imports: return F"""[{imports}]""" __a : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a : List[Any] = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]" __a : List[Any] = import_statement.split('\n' ) if len(_SCREAMING_SNAKE_CASE ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __a : List[str] = 2 if lines[1].strip() == '[' else 1 __a : int = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __a : str = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) __a : str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_SCREAMING_SNAKE_CASE ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __a : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __a : str = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a : List[Any] = keys[:-1] __a : str = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) return "\n".join(_SCREAMING_SNAKE_CASE ) else: # Finally we have to deal with imports fitting on one line __a : Tuple = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE ) return import_statement def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: __a : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __a : Optional[int] = split_code_in_indented_blocks( _SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __a : List[Any] = main_blocks[block_idx] __a : Any = block.split('\n' ) # Get to the start of the imports. __a : Dict = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) else: line_idx += 1 if line_idx >= len(_SCREAMING_SNAKE_CASE ): continue # Ignore beginning and last line: they don't contain anything. __a : Any = '\n'.join(block_lines[line_idx:-1] ) __a : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __a : int = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE ) # We have two categories of import key: list or _import_structure[key].append/extend __a : Optional[Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __a : Union[str, Any] = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __a : Optional[Any] = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None] __a : Tuple = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __a : List[str] = 0 __a : Any = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __a : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_SCREAMING_SNAKE_CASE ) count += 1 # And we put our main block back together with its first and last line. __a : List[Any] = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_SCREAMING_SNAKE_CASE ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write('\n'.join(_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any]=True ): __a : Optional[int] = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __a : int = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=_SCREAMING_SNAKE_CASE ) if result: __a : List[Any] = [os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )] if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.""" ) if __name__ == "__main__": __lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowercase : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
27
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): __a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' ) __a : Union[str, Any] = soup.findAll('h1' ) __a : int = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
27
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __lowercase : Any = logging.get_logger(__name__) __lowercase : List[str] = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "trajectory_transformer" A_ = ["past_key_values"] A_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __a=100 , __a=5 , __a=1 , __a=1 , __a=249 , __a=6 , __a=17 , __a=25 , __a=4 , __a=4 , __a=128 , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0006 , __a=512 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=True , __a=1 , __a=5_0256 , __a=5_0256 , **__a , ): '''simple docstring''' __a : List[Any] = vocab_size __a : str = action_weight __a : List[str] = reward_weight __a : Tuple = value_weight __a : Union[str, Any] = max_position_embeddings __a : List[str] = block_size __a : Optional[int] = action_dim __a : List[Any] = observation_dim __a : Union[str, Any] = transition_dim __a : List[str] = learning_rate __a : str = n_layer __a : List[Any] = n_head __a : Optional[Any] = n_embd __a : int = embd_pdrop __a : Optional[Any] = attn_pdrop __a : Optional[Any] = resid_pdrop __a : List[Any] = initializer_range __a : Optional[int] = layer_norm_eps __a : Any = kaiming_initializer_range __a : str = use_cache super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
27
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : int = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "trocr" A_ = ["past_key_values"] A_ = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self , __a=5_0265 , __a=1024 , __a=12 , __a=16 , __a=4096 , __a="gelu" , __a=512 , __a=0.1 , __a=0.0 , __a=0.0 , __a=2 , __a=0.02 , __a=0.0 , __a=True , __a=False , __a=True , __a=True , __a=1 , __a=0 , __a=2 , **__a , ): '''simple docstring''' __a : Union[str, Any] = vocab_size __a : Dict = d_model __a : Optional[Any] = decoder_layers __a : Tuple = decoder_attention_heads __a : int = decoder_ffn_dim __a : Union[str, Any] = activation_function __a : List[str] = max_position_embeddings __a : List[Any] = dropout __a : Union[str, Any] = attention_dropout __a : Optional[int] = activation_dropout __a : Optional[Any] = init_std __a : List[Any] = decoder_layerdrop __a : int = use_cache __a : List[Any] = scale_embedding __a : Any = use_learned_position_embeddings __a : List[str] = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
27
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __lowercase : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): for attribute in key.split('.' ): __a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: __a : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : str = value elif weight_type == "weight_v": __a : Optional[Any] = value elif weight_type == "bias": __a : Union[str, Any] = value else: __a : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : int = [] __a : List[str] = fairseq_model.state_dict() __a : Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __a : int = None for name, value in fairseq_dict.items(): __a : List[str] = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __a : List[str] = True elif name.split('.' )[0] == "proj": __a : Tuple = fairseq_model.proj __a : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __a : List[Any] = True if "*" in mapped_key: __a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: __a : List[Any] = 'weight_g' elif "weight_v" in name: __a : List[Any] = 'weight_v' elif "bias" in name: __a : Optional[Any] = 'bias' elif "weight" in name: __a : Tuple = 'weight' else: __a : Optional[Any] = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[str] = full_name.split('conv_layers.' )[-1] __a : Any = name.split('.' ) __a : List[str] = int(items[0] ) __a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a , __a : List[str] = emb.weight.shape __a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = emb.weight.data return lin_layer def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __a : Union[str, Any] = f.readlines() __a : Tuple = [line.split(' ' )[0] for line in lines] __a : int = len(_SCREAMING_SNAKE_CASE ) __a : List[Any] = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ): __a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Any = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) __a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __a : Optional[int] = model[0].eval() # set weights for wav2vec2 encoder __a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE ) __a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE ) __a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) __a : int = False # add projection layer __a : str = nn.Parameter(projection_layer.weight ) __a : Any = nn.Parameter(projection_layer.bias ) __a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = hf_wavavec.config.to_dict() __a : Tuple = tokenizer.pad_token_id __a : Optional[int] = tokenizer.bos_token_id __a : Union[str, Any] = tokenizer.eos_token_id __a : Tuple = 'speech_to_text_2' __a : Tuple = 'wav2vec2' __a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __lowercase : Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
27
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main __a : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
27
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowercase : Tuple = get_logger(__name__) class __UpperCamelCase : def __init__( self , __a , __a=None ): '''simple docstring''' __a : List[Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , __a , getattr(__a , __a ) ) __a : Dict = module._original_module if isinstance(__a , _PatchedModuleObj ) else module class __UpperCamelCase : A_ = [] def __init__( self , __a , __a , __a , __a=None ): '''simple docstring''' __a : int = obj __a : Union[str, Any] = target __a : int = new __a : Dict = target.split('.' )[0] __a : Any = {} __a : Any = attrs or [] def __enter__( self ): '''simple docstring''' *__a , __a : Dict = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a ) ): try: __a : str = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a : Optional[int] = getattr(self.obj , __a ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a : List[str] = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) ) __a : List[str] = getattr(self.obj , __a ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) ) __a : str = getattr(__a , __a ) # finally set the target attribute setattr(__a , __a , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a : Any = getattr(import_module('.'.join(__a ) ) , __a ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a ) is attr_value: __a : List[Any] = getattr(self.obj , __a ) setattr(self.obj , __a , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a : str = globals()['__builtins__'][target_attr] setattr(self.obj , __a , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__a ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , __a , self.original.pop(__a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __UpperCAmelCase ( self ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowercase : Optional[int] = 16 __lowercase : List[str] = 32 def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): __a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) __a : Optional[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) __a : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __a : List[Any] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. __a : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __a : List[Any] = 16 elif accelerator.mixed_precision != "no": __a : Any = 8 else: __a : Optional[int] = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) # Instantiate dataloaders. __a : Union[str, Any] = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) __a : List[str] = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowercase : Tuple = mocked_dataloaders # noqa: F811 def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1": __a : Optional[int] = 2 # New Code # __a : Union[str, Any] = int(args.gradient_accumulation_steps ) __a : Dict = int(args.local_sgd_steps ) # Initialize accelerator __a : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Dict = config['lr'] __a : Any = int(config['num_epochs'] ) __a : int = int(config['seed'] ) __a : List[str] = int(config['batch_size'] ) __a : Any = evaluate.load('glue' , 'mrpc' ) set_seed(_SCREAMING_SNAKE_CASE ) __a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __a : Dict = model.to(accelerator.device ) # Instantiate optimizer __a : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler __a : Any = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : Any = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() with LocalSGD( accelerator=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , local_sgd_steps=_SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): __a : List[Any] = model(**_SCREAMING_SNAKE_CASE ) __a : int = output.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __a : Dict = model(**_SCREAMING_SNAKE_CASE ) __a : Tuple = outputs.logits.argmax(dim=-1 ) __a , __a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) __a : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=_SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument( '--local_sgd_steps' , type=_SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) __a : Optional[Any] = parser.parse_args() __a : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
1
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , _SCREAMING_SNAKE_CASE : Optional[int]=1_024 , _SCREAMING_SNAKE_CASE : Tuple=False , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Tuple = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path='train' , **_SCREAMING_SNAKE_CASE ) __a : int = tok.pad_token_id def get_lens(_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = tqdm( DataLoader(_SCREAMING_SNAKE_CASE , batch_size=512 , num_workers=8 , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __a : Dict = [] for batch in dl: __a : Optional[int] = batch['input_ids'].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() __a : Any = batch['labels'].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() if consider_target: for src, tgt in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): max_lens.append(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: max_lens.extend(_SCREAMING_SNAKE_CASE ) return max_lens __a : Optional[int] = get_lens(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path='val' , **_SCREAMING_SNAKE_CASE ) __a : int = get_lens(_SCREAMING_SNAKE_CASE ) pickle_save(_SCREAMING_SNAKE_CASE , train_ds.len_file ) pickle_save(_SCREAMING_SNAKE_CASE , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
27
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "isbn/0140328726" ): __a : List[str] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: __a : Tuple = F"""{olid} is not a valid Open Library olid""" raise ValueError(_SCREAMING_SNAKE_CASE ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict ): __a : List[str] = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } __a : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a : Union[str, Any] = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] __a : Any = data['First sentence']['value'] for key, value in data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : str = ', '.join(_SCREAMING_SNAKE_CASE ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __lowercase : Optional[int] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: __lowercase : List[Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
27
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
1
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __lowercase : List[str] = datasets.logging.get_logger(__name__) __lowercase : List[str] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __lowercase : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __lowercase : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __lowercase : str = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' ) __a : int = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: __a : str = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __a : List[str] = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer __a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __a : Optional[int] = score.BleurtScorer(os.path.join(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : Union[str, Any] = self.scorer.score(references=__a , candidates=__a ) return {"scores": scores}
27
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
1
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowercase : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : tuple , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=False , ): output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , ) else: export( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , ) @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ): __a : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __a : Dict = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __a : Union[str, Any] = 'cpu' __a : str = Path(_SCREAMING_SNAKE_CASE ) # VAE DECODER __a : Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' ) __a : Union[str, Any] = vae_decoder.config.latent_channels # forward only through the decoder part __a : Any = vae_decoder.decode onnx_export( _SCREAMING_SNAKE_CASE , model_args=( torch.randn(1 , _SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=_SCREAMING_SNAKE_CASE , ) del vae_decoder if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __lowercase : Tuple = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=False ): try: __a : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __a : Dict = default else: # KEY is set, convert it to True or False. try: __a : Any = strtobool(_SCREAMING_SNAKE_CASE ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __lowercase : Tuple = parse_flag_from_env('RUN_SLOW', default=False) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): return unittest.skip('Test was skipped' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : int=None ): if test_case is None: return partial(_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE ) return unittest.skipUnless(is_torch_version('>=' , _SCREAMING_SNAKE_CASE ) , F"""test requires torch version >= {version}""" )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_SCREAMING_SNAKE_CASE ) __lowercase : Optional[Any] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_SCREAMING_SNAKE_CASE ) class __UpperCamelCase ( unittest.TestCase ): A_ = True @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : int = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ): '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__a ) class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = mocks if isinstance(__a , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : str = AcceleratorState() __a : List[str] = tensor[None].clone().to(state.device ) __a : List[str] = gather(_SCREAMING_SNAKE_CASE ).cpu() __a : List[str] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _SCREAMING_SNAKE_CASE ): return False return True class __UpperCamelCase : def __init__( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = returncode __a : List[str] = stdout __a : Optional[Any] = stderr async def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): while True: __a : Dict = await stream.readline() if line: callback(_SCREAMING_SNAKE_CASE ) else: break async def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=False , _SCREAMING_SNAKE_CASE : str=False ): if echo: print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) ) __a : Dict = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __a : Optional[Any] = [] __a : Union[str, Any] = [] def tee(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict="" ): __a : Union[str, Any] = line.decode('utf-8' ).rstrip() sink.append(_SCREAMING_SNAKE_CASE ) if not quiet: print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ), ] , timeout=_SCREAMING_SNAKE_CASE , ) return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Dict=180 , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : int=True ): __a : Optional[int] = asyncio.get_event_loop() __a : Union[str, Any] = loop.run_until_complete( _stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) ) __a : Tuple = ' '.join(_SCREAMING_SNAKE_CASE ) if result.returncode > 0: __a : Union[str, Any] = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class __UpperCamelCase ( lowerCAmelCase_ ): pass def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any]=False ): try: __a : List[Any] = subprocess.check_output(_SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ): __a : List[Any] = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{" ".join(_SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
27
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
1
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __lowercase : int = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , **__a ): '''simple docstring''' super().__init__(**__a ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__( self , __a , **__a ): '''simple docstring''' return super().__call__(__a , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : List[str] = {} if "candidate_labels" in kwargs: __a : List[Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __a : Optional[int] = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self , __a , __a=None , __a="This is a sound of {}." ): '''simple docstring''' if isinstance(__a , __a ): if audio.startswith('http://' ) or audio.startswith('https://' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png __a : Union[str, Any] = requests.get(__a ).content else: with open(__a , 'rb' ) as f: __a : Tuple = f.read() if isinstance(__a , __a ): __a : Dict = ffmpeg_read(__a , self.feature_extractor.sampling_rate ) if not isinstance(__a , np.ndarray ): raise ValueError('We expect a numpy ndarray as input' ) if len(audio.shape ) != 1: raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' ) __a : Dict = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' ) __a : Tuple = candidate_labels __a : int = [hypothesis_template.format(__a ) for x in candidate_labels] __a : Tuple = self.tokenizer(__a , return_tensors=self.framework , padding=__a ) __a : Union[str, Any] = [text_inputs] return inputs def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = model_inputs.pop('candidate_labels' ) __a : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , __a ): __a : Dict = text_inputs[0] else: # Batching case. __a : Tuple = text_inputs[0][0] __a : Union[str, Any] = self.model(**__a , **__a ) __a : Tuple = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_audio, } return model_outputs def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Dict = model_outputs.pop('candidate_labels' ) __a : Tuple = model_outputs['logits'][0] if self.framework == "pt": __a : Optional[Any] = logits.softmax(dim=0 ) __a : List[Any] = probs.tolist() else: raise ValueError('`tf` framework not supported.' ) __a : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] ) ] return result
27
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : List[str] = logging.get_logger(__name__) __lowercase : Dict = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "distilbert" A_ = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self , __a=3_0522 , __a=512 , __a=False , __a=6 , __a=12 , __a=768 , __a=4 * 768 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.02 , __a=0.1 , __a=0.2 , __a=0 , **__a , ): '''simple docstring''' __a : Any = vocab_size __a : List[str] = max_position_embeddings __a : Optional[Any] = sinusoidal_pos_embds __a : int = n_layers __a : Optional[Any] = n_heads __a : Optional[int] = dim __a : Optional[int] = hidden_dim __a : Optional[Any] = dropout __a : Tuple = attention_dropout __a : Dict = activation __a : List[str] = initializer_range __a : Optional[Any] = qa_dropout __a : Optional[int] = seq_classif_dropout super().__init__(**__a , pad_token_id=__a ) class __UpperCamelCase ( lowerCAmelCase_ ): @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": __a : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "naver-clova-ix/donut-base-finetuned-docvqa" A_ = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) A_ = "document_qa" A_ = AutoProcessor A_ = VisionEncoderDecoderModel A_ = ["image", "text"] A_ = ["text"] def __init__( self , *__a , **__a ): '''simple docstring''' if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __a : Any = task_prompt.replace('{user_input}' , __a ) __a : Optional[Any] = self.pre_processor.tokenizer( __a , add_special_tokens=__a , return_tensors='pt' ).input_ids __a : Optional[int] = self.pre_processor(__a , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = self.pre_processor.batch_decode(__a )[0] __a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __a : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __a : Any = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token __a : Tuple = self.pre_processor.tokenajson(__a ) return sequence["answer"]
27
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] | None = None , _SCREAMING_SNAKE_CASE : dict[str, float] | None = None , _SCREAMING_SNAKE_CASE : bool = False , ): __a : str = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __a : int = { 'a': 0.0_8_4_9_7, 'b': 0.0_1_4_9_2, 'c': 0.0_2_2_0_2, 'd': 0.0_4_2_5_3, 'e': 0.1_1_1_6_2, 'f': 0.0_2_2_2_8, 'g': 0.0_2_0_1_5, 'h': 0.0_6_0_9_4, 'i': 0.0_7_5_4_6, 'j': 0.0_0_1_5_3, 'k': 0.0_1_2_9_2, 'l': 0.0_4_0_2_5, 'm': 0.0_2_4_0_6, 'n': 0.0_6_7_4_9, 'o': 0.0_7_5_0_7, 'p': 0.0_1_9_2_9, 'q': 0.0_0_0_9_5, 'r': 0.0_7_5_8_7, 's': 0.0_6_3_2_7, 't': 0.0_9_3_5_6, 'u': 0.0_2_7_5_8, 'v': 0.0_0_9_7_8, 'w': 0.0_2_5_6_0, 'x': 0.0_0_1_5_0, 'y': 0.0_1_9_9_4, 'z': 0.0_0_0_7_7, } else: # Custom frequencies dictionary __a : int = frequencies_dict if not case_sensitive: __a : Dict = ciphertext.lower() # Chi squared statistic values __a : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_SCREAMING_SNAKE_CASE ) ): __a : Optional[Any] = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __a : Any = (alphabet_letters.index(letter.lower() ) - shift) % len( _SCREAMING_SNAKE_CASE ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __a : Union[str, Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __a : Any = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __a : Optional[int] = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __a : Optional[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula __a : Optional[int] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __a : Optional[Any] = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __a : Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula __a : Tuple = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __a : Dict = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] __a : int = min( _SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __a ) , ( __a ) , ) : Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
27
'''simple docstring''' import torch from transformers import AutoModel class __UpperCamelCase ( torch.nn.Module ): def __init__( self , __a="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__a , self ).__init__() __a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a ) __a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 ) __a : Union[str, Any] = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return self.bert(**__a ).last_hidden_state def __UpperCAmelCase ( self , __a ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__a ) def __UpperCAmelCase ( self , __a , __a , __a=1 ): '''simple docstring''' return self.softmax(T * self.cos(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = W_supports['sizes'].tolist() __a : Union[str, Any] = W_supports['start_token_id'].item() __a : Any = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __a : Tuple = self.BERT(**__a ) __a : str = self.BERT(**__a ) __a : Any = None __a : Dict = None __a : Dict = W_supports['input_ids'] == start_token_id __a : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: __a : Optional[int] = 0 else: __a : Union[str, Any] = support_sizes[i - 1] __a : int = S[s : s + size][start_token_masks[s : s + size]] __a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] __a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __a : str = torch.vstack((p_starts, p_start) ) __a : str = torch.vstack((p_ends, p_end) ) else: __a : List[str] = p_start __a : int = p_end return p_starts, p_ends
27
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): A_ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __a , __a , __a = None , __a = 5_0257 , __a = 1024 , __a = 768 , __a = 12 , __a = 12 , __a = None , __a = "gelu_new" , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 1E-5 , __a = 0.02 , __a = True , __a = True , __a = False , __a = False , ): '''simple docstring''' super().__init__() __a : List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) __a : Union[str, Any] = prefix_inner_dim __a : Any = prefix_hidden_dim __a : Any = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __a : Union[str, Any] = ( nn.Linear(self.prefix_hidden_dim , __a ) if self.prefix_hidden_dim is not None else nn.Identity() ) __a : Dict = GPTaConfig( vocab_size=__a , n_positions=__a , n_embd=__a , n_layer=__a , n_head=__a , n_inner=__a , activation_function=__a , resid_pdrop=__a , embd_pdrop=__a , attn_pdrop=__a , layer_norm_epsilon=__a , initializer_range=__a , scale_attn_weights=__a , use_cache=__a , scale_attn_by_inverse_layer_idx=__a , reorder_and_upcast_attn=__a , ) __a : List[str] = GPTaLMHeadModel(__a ) def __UpperCAmelCase ( self , __a , __a , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = self.transformer.transformer.wte(__a ) __a : Dict = self.encode_prefix(__a ) __a : str = self.decode_prefix(__a ) __a : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __a : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __a : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) __a : List[str] = self.transformer(inputs_embeds=__a , labels=__a , attention_mask=__a ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' return torch.zeros(__a , self.prefix_length , dtype=torch.intaa , device=__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.encode_prefix(__a ) @torch.no_grad() def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Tuple = torch.split(__a , 1 , dim=0 ) __a : Union[str, Any] = [] __a : Optional[Any] = [] for feature in features: __a : Optional[int] = self.decode_prefix(feature.to(__a ) ) # back to the clip feature # Only support beam search for now __a , __a : List[str] = self.generate_beam( input_embeds=__a , device=__a , eos_token_id=__a ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __a : Optional[int] = torch.stack(__a ) __a : Optional[int] = torch.stack(__a ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a = 5 , __a = 67 , __a = 1.0 , __a = None , ): '''simple docstring''' __a : Any = eos_token_id __a : Any = None __a : Union[str, Any] = None __a : List[Any] = torch.ones(__a , device=__a , dtype=torch.int ) __a : Optional[Any] = torch.zeros(__a , device=__a , dtype=torch.bool ) if input_embeds is not None: __a : Optional[Any] = input_embeds else: __a : Union[str, Any] = self.transformer.transformer.wte(__a ) for i in range(__a ): __a : str = self.transformer(inputs_embeds=__a ) __a : int = outputs.logits __a : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __a : Optional[Any] = logits.softmax(-1 ).log() if scores is None: __a , __a : Dict = logits.topk(__a , -1 ) __a : Union[str, Any] = generated.expand(__a , *generated.shape[1:] ) __a , __a : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __a : Union[str, Any] = next_tokens else: __a : Any = tokens.expand(__a , *tokens.shape[1:] ) __a : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 ) else: __a : Dict = -float(np.inf ) __a : Optional[int] = 0 __a : Tuple = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __a : List[Any] = scores_sum / seq_lengths[:, None] __a , __a : int = scores_sum_average.view(-1 ).topk(__a , -1 ) __a : Union[str, Any] = next_tokens // scores_sum.shape[1] __a : Tuple = seq_lengths[next_tokens_source] __a : Optional[Any] = next_tokens % scores_sum.shape[1] __a : Any = next_tokens.unsqueeze(1 ) __a : Tuple = tokens[next_tokens_source] __a : Tuple = torch.cat((tokens, next_tokens) , dim=1 ) __a : List[Any] = generated[next_tokens_source] __a : Union[str, Any] = scores_sum_average * seq_lengths __a : Union[str, Any] = is_stopped[next_tokens_source] __a : Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __a : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 ) __a : List[str] = is_stopped + next_tokens.eq(__a ).squeeze() if is_stopped.all(): break __a : str = scores / seq_lengths __a : Dict = scores.argsort(descending=__a ) # tokens tensors are already padded to max_seq_length __a : Optional[Any] = [tokens[i] for i in order] __a : Tuple = torch.stack(__a , dim=0 ) __a : str = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCamelCase ( metaclass=lowerCAmelCase_ ): A_ = ["torch", "transformers", "onnx"] def __init__( self , *__a , **__a ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __UpperCAmelCase ( cls , *__a , **__a ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
27
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ort.SessionOptions() __a : Dict = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) __a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default __a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'A red cat sitting on a park bench' __a : int = np.random.RandomState(0 ) __a : Tuple = pipe( prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , ) __a : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
27
1
'''simple docstring''' from heapq import heappop, heappush import numpy as np def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : bool , ): __a , __a : Dict = grid.shape __a : Optional[Any] = [-1, 1, 0, 0] __a : List[Any] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __a , __a : Optional[int] = [(0, source)], set() __a : Union[str, Any] = np.full((rows, cols) , np.inf ) __a : Union[str, Any] = 0 __a : Tuple = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE ) __a : int = None while queue: ((__a) , (__a)) : List[str] = heappop(_SCREAMING_SNAKE_CASE ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __a : Any = [] while (x, y) != source: path.append((x, y) ) __a , __a : Dict = predecessors[x, y] path.append(_SCREAMING_SNAKE_CASE ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a , __a : int = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __a : Optional[int] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) ) __a : Optional[int] = dist + 1 __a : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase : Dict = 16 __lowercase : List[Any] = 32 def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __a : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *__a ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() __a : Dict = torch.cuda.memory_allocated() __a : List[Any] = torch.cuda.max_memory_allocated() __a : Tuple = bamb(self.end - self.begin ) __a : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ): __a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : List[Any] = load_dataset( 'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} ) def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ): # max_length=None => use the model max length (it's actually the default) __a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __a : List[str] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __a : int = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) __a : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Initialize accelerator __a : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Dict = config['lr'] __a : str = int(config['num_epochs'] ) __a : Optional[int] = int(config['seed'] ) __a : Any = int(config['batch_size'] ) __a : List[str] = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) __a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer __a : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __a : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __a : Union[str, Any] = 1 __a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __a : str = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: __a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __a : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly __a : Dict = 0 # Now we train the model __a : str = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): __a : List[Any] = model(**_SCREAMING_SNAKE_CASE ) __a : str = outputs.loss __a : str = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , ) __a : List[str] = parser.parse_args() __a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
1
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowercase : List[Any] = 'bart' __lowercase : Union[str, Any] = True @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __a : Optional[int] = qar_model.eval() else: __a , __a : str = (None, None) if MODEL_TYPE == "bart": __a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __a : str = sas_model.eval() else: __a , __a : Tuple = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : Optional[Any] = faiss.StandardGpuResources() __a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __a : int = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) __a : int = faiss.IndexFlatIP(128 ) __a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE ) wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU else: __a , __a : str = (None, None) __a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __a : Dict = elia['train_eli5'] __a : Optional[int] = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) __a : str = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_SCREAMING_SNAKE_CASE ) return (elia_train, eli5_train_q_index) __lowercase , __lowercase , __lowercase : Any = load_indexes() __lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models() __lowercase , __lowercase : int = load_train_data() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ): __a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]] return nn_examples def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ): if source == "none": __a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a : str = query_qa_dense_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a , __a : Union[str, Any] = query_es_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , ) __a : Dict = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None), } ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ): with torch.no_grad(): __a : Union[str, Any] = qa_sas_generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __lowercase : Dict = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options') if demo_options: __lowercase : Any = st.sidebar.selectbox( '', action_list, index=3, ) __lowercase : Tuple = action_list.index(action_st) __lowercase : Tuple = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __lowercase : List[Any] = show_type == 'Show full text of passages' else: __lowercase : int = 3 __lowercase : str = True __lowercase : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __lowercase : str = 'wiki40b' __lowercase : List[Any] = 'dense' __lowercase : Dict = 'beam' __lowercase : Optional[int] = 2 __lowercase : List[str] = 64 __lowercase : Tuple = 2_56 __lowercase : List[str] = None __lowercase : Tuple = None __lowercase : List[Any] = st.sidebar.checkbox('Generation options') if generate_options: __lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __lowercase : Tuple = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) __lowercase : int = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": __lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowercase : Dict = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __lowercase : Union[str, Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __lowercase : List[str] = None # start main text __lowercase : int = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowercase : Any = st.text_input('Enter your question here:', '') else: __lowercase : Any = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10) __lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) __lowercase : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowercase : str = support_list[:10] __lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __lowercase , __lowercase : int = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __lowercase : Any = res[1].strip() if sec_titles == "": __lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __lowercase : Union[str, Any] = sec_titles.split(' & ') __lowercase : str = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __lowercase : str = find_nearest_training(question) __lowercase : Optional[int] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __lowercase : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
27
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowercase : List[str] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = ["input_values", "padding_mask"] def __init__( self , __a = 1 , __a = 2_4000 , __a = 0.0 , __a = None , __a = None , **__a , ): '''simple docstring''' super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a ) __a : Dict = chunk_length_s __a : Any = overlap @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , __a , __a = None , __a = False , __a = None , __a = None , __a = None , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs __a : Dict = True __a : Tuple = bool( isinstance(__a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__a , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__a , np.ndarray ): __a : str = np.asarray(__a , dtype=np.floataa ) elif isinstance(__a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __a : List[Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __a : str = [np.asarray(__a ).T] # verify inputs are valid for idx, example in enumerate(__a ): if example.ndim > 2: raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" ) __a : Dict = None __a : Any = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __a : List[Any] = min(array.shape[0] for array in raw_audio ) __a : Any = int(np.floor(max_length / self.chunk_stride ) ) __a : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __a : Optional[int] = max(array.shape[0] for array in raw_audio ) __a : List[str] = int(np.ceil(max_length / self.chunk_stride ) ) __a : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length __a : Optional[Any] = 'max_length' else: __a : Any = input_values # normal padding on batch if padded_inputs is None: __a : Tuple = self.pad( __a , max_length=__a , truncation=__a , padding=__a , return_attention_mask=__a , ) if padding: __a : Union[str, Any] = padded_inputs.pop('attention_mask' ) __a : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: __a : str = example[..., None] input_values.append(example.T ) __a : List[str] = input_values if return_tensors is not None: __a : str = padded_inputs.convert_to_tensors(__a ) return padded_inputs
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Tuple = logging.get_logger(__name__) __lowercase : List[Any] = torch.device('cpu') def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = [] for k in state_dict.keys(): __a : List[Any] = k if ".pwconv" in k: __a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __a : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __a : Union[str, Any] = k_new.split('.' ) if ls[2].isdigit(): __a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Union[str, Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : str = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Any = idalabel __a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __a : Dict = [3, 3, 6, 4] __a : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __a : Dict = [3, 3, 9, 6] __a : List[str] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __a : Dict = [4, 3, 10, 5] __a : Optional[int] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __a : Tuple = [4, 4, 12, 6] __a : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : Optional[Any] = checkpoint __a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs __a : Tuple = prepare_img() __a : str = ViTImageProcessor.from_pretrained('preprocessor_config' ) __a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE ) __a : Dict = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') __lowercase : Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
27
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "char" A_ = "bpe" A_ = "wp" __lowercase : Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = ["image_processor", "char_tokenizer"] A_ = "ViTImageProcessor" A_ = "MgpstrTokenizer" def __init__( self , __a=None , __a=None , **__a ): '''simple docstring''' __a : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __a , ) __a : Any = kwargs.pop('feature_extractor' ) __a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) __a : Any = tokenizer __a : int = AutoTokenizer.from_pretrained('gpt2' ) __a : List[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(__a , __a ) def __call__( self , __a=None , __a=None , __a=None , **__a ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __a : List[str] = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None: __a : Optional[int] = self.char_tokenizer(__a , return_tensors=__a , **__a ) if text is None: return inputs elif images is None: return encodings else: __a : Dict = encodings['input_ids'] return inputs def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a , __a , __a : Optional[int] = sequences __a : List[Any] = char_preds.size(0 ) __a , __a : int = self._decode_helper(__a , 'char' ) __a , __a : List[str] = self._decode_helper(__a , 'bpe' ) __a , __a : Dict = self._decode_helper(__a , 'wp' ) __a : Dict = [] __a : Optional[Any] = [] for i in range(__a ): __a : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]] __a : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] __a : int = scores.index(max(__a ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __a : Union[str, Any] = {} __a : Optional[Any] = final_strs __a : List[Any] = final_scores __a : Tuple = char_strs __a : Optional[int] = bpe_strs __a : Optional[Any] = wp_strs return out def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if format == DecodeType.CHARACTER: __a : str = self.char_decode __a : Tuple = 1 __a : Optional[int] = '[s]' elif format == DecodeType.BPE: __a : str = self.bpe_decode __a : Optional[int] = 2 __a : Tuple = '#' elif format == DecodeType.WORDPIECE: __a : List[str] = self.wp_decode __a : str = 102 __a : Any = '[SEP]' else: raise ValueError(f"""Format {format} is not supported.""" ) __a , __a : Tuple = [], [] __a : Union[str, Any] = pred_logits.size(0 ) __a : Union[str, Any] = pred_logits.size(1 ) __a , __a : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a ) __a : str = preds_index.view(-1 , __a )[:, 1:] __a : Optional[int] = decoder(__a ) __a , __a : Tuple = torch.nn.functional.softmax(__a , dim=2 ).max(dim=2 ) __a : List[str] = preds_max_prob[:, 1:] for index in range(__a ): __a : int = preds_str[index].find(__a ) __a : Dict = preds_str[index][:pred_eos] __a : List[str] = preds_index[index].cpu().tolist() __a : List[Any] = pred_index.index(__a ) if eos_token in pred_index else -1 __a : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] __a : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__a ) conf_scores.append(__a ) return dec_strs, conf_scores def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(__a )] return decode_strs def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : str = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(__a )] return decode_strs
27
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "umt5" A_ = ["past_key_values"] def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ): '''simple docstring''' super().__init__( is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , ) __a : Any = vocab_size __a : Any = d_model __a : str = d_kv __a : Dict = d_ff __a : Union[str, Any] = num_layers __a : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a : Optional[int] = num_heads __a : Tuple = relative_attention_num_buckets __a : Optional[Any] = relative_attention_max_distance __a : Optional[int] = dropout_rate __a : List[Any] = layer_norm_epsilon __a : int = initializer_factor __a : Union[str, Any] = feed_forward_proj __a : Any = use_cache __a : List[Any] = self.feed_forward_proj.split('-' ) __a : Dict = act_info[-1] __a : Dict = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) if feed_forward_proj == "gated-gelu": __a : Optional[int] = 'gelu_new' @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.d_model @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_heads @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_layers class __UpperCamelCase ( lowerCAmelCase_ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __a : Dict = 'past_encoder_sequence + sequence' __a : Tuple = {0: 'batch'} __a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __a : List[Any] = {0: 'batch', 1: 'decoder_sequence'} __a : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __UpperCAmelCase ( self ): '''simple docstring''' return 13 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 5E-4
27
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Optional[Any] = logging.get_logger(__name__) __lowercase : Tuple = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "markuplm" def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=0 , __a=0 , __a=2 , __a=256 , __a=1024 , __a=216 , __a=1001 , __a=32 , __a=50 , __a="absolute" , __a=True , __a=None , **__a , ): '''simple docstring''' super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , ) __a : List[str] = vocab_size __a : Optional[Any] = hidden_size __a : Optional[int] = num_hidden_layers __a : Optional[Any] = num_attention_heads __a : Optional[int] = hidden_act __a : Union[str, Any] = intermediate_size __a : str = hidden_dropout_prob __a : List[Any] = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : Optional[Any] = type_vocab_size __a : Union[str, Any] = initializer_range __a : Optional[Any] = layer_norm_eps __a : Optional[Any] = position_embedding_type __a : Optional[int] = use_cache __a : Optional[Any] = classifier_dropout # additional properties __a : int = max_depth __a : Union[str, Any] = max_xpath_tag_unit_embeddings __a : str = max_xpath_subs_unit_embeddings __a : Optional[Any] = tag_pad_id __a : Dict = subs_pad_id __a : List[str] = xpath_unit_hidden_size
27
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): __a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' ) __a : Union[str, Any] = soup.findAll('h1' ) __a : int = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : int = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Optional[int] = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : Tuple = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ): A_ = "bit" A_ = ["preactivation", "bottleneck"] A_ = ["SAME", "VALID"] def __init__( self , __a=3 , __a=64 , __a=[256, 512, 1024, 2048] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ): '''simple docstring''' super().__init__(**__a ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: __a : Tuple = global_padding.upper() else: raise ValueError(f"""Padding strategy {global_padding} not supported""" ) __a : str = num_channels __a : List[str] = embedding_size __a : str = hidden_sizes __a : Optional[int] = depths __a : Dict = layer_type __a : Union[str, Any] = hidden_act __a : int = global_padding __a : int = num_groups __a : str = drop_path_rate __a : Any = embedding_dynamic_padding __a : List[Any] = output_stride __a : Dict = width_factor __a : List[Any] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )] __a , __a : str = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names )
27
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __lowercase : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): for attribute in key.split('.' ): __a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: __a : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : str = value elif weight_type == "weight_v": __a : Optional[Any] = value elif weight_type == "bias": __a : Union[str, Any] = value else: __a : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : int = [] __a : List[str] = fairseq_model.state_dict() __a : Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __a : int = None for name, value in fairseq_dict.items(): __a : List[str] = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __a : List[str] = True elif name.split('.' )[0] == "proj": __a : Tuple = fairseq_model.proj __a : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __a : List[Any] = True if "*" in mapped_key: __a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: __a : List[Any] = 'weight_g' elif "weight_v" in name: __a : List[Any] = 'weight_v' elif "bias" in name: __a : Optional[Any] = 'bias' elif "weight" in name: __a : Tuple = 'weight' else: __a : Optional[Any] = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[str] = full_name.split('conv_layers.' )[-1] __a : Any = name.split('.' ) __a : List[str] = int(items[0] ) __a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a , __a : List[str] = emb.weight.shape __a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = emb.weight.data return lin_layer def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __a : Union[str, Any] = f.readlines() __a : Tuple = [line.split(' ' )[0] for line in lines] __a : int = len(_SCREAMING_SNAKE_CASE ) __a : List[Any] = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ): __a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Any = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) __a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __a : Optional[int] = model[0].eval() # set weights for wav2vec2 encoder __a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE ) __a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE ) __a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) __a : int = False # add projection layer __a : str = nn.Parameter(projection_layer.weight ) __a : Any = nn.Parameter(projection_layer.bias ) __a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = hf_wavavec.config.to_dict() __a : Tuple = tokenizer.pad_token_id __a : Optional[int] = tokenizer.bos_token_id __a : Union[str, Any] = tokenizer.eos_token_id __a : Tuple = 'speech_to_text_2' __a : Tuple = 'wav2vec2' __a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __lowercase : Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
27
1
'''simple docstring''' import argparse import os import re __lowercase : List[str] = 'src/transformers' # Pattern that looks at the indentation in a line. __lowercase : Any = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __lowercase : str = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __lowercase : Any = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __lowercase : Tuple = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __lowercase : Optional[Any] = re.compile(R'\[([^\]]+)\]') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = _re_indent.search(_SCREAMING_SNAKE_CASE ) return "" if search is None else search.groups()[0] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]="" , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): __a : Optional[Any] = 0 __a : List[str] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_SCREAMING_SNAKE_CASE ): index += 1 __a : Optional[int] = ['\n'.join(lines[:index] )] else: __a : Tuple = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __a : Union[str, Any] = [lines[index]] index += 1 while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) if index < len(_SCREAMING_SNAKE_CASE ) - 1: __a : Dict = [lines[index + 1]] index += 1 else: __a : Union[str, Any] = [] else: blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_SCREAMING_SNAKE_CASE ) > 0: blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): def _inner(_SCREAMING_SNAKE_CASE : List[Any] ): return key(_SCREAMING_SNAKE_CASE ).lower().replace('_' , '' ) return _inner def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=None ): # If no key is provided, we use a noop. def noop(_SCREAMING_SNAKE_CASE : Dict ): return x if key is None: __a : Tuple = noop # Constants are all uppercase, they go first. __a : Tuple = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __a : str = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()] # Functions begin with a lowercase, they go last. __a : int = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()] __a : Dict = ignore_underscore(_SCREAMING_SNAKE_CASE ) return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): # This inner function sort imports between [ ]. def _replace(_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[Any] = match.groups()[0] if "," not in imports: return F"""[{imports}]""" __a : List[Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a : List[str] = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]" __a : str = import_statement.split('\n' ) if len(_SCREAMING_SNAKE_CASE ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __a : Union[str, Any] = 2 if lines[1].strip() == '[' else 1 __a : Optional[Any] = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __a : Dict = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) __a : str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_SCREAMING_SNAKE_CASE ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __a : Dict = _re_bracket_content.sub(_replace , lines[1] ) else: __a : Optional[int] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a : Tuple = keys[:-1] __a : List[Any] = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) return "\n".join(_SCREAMING_SNAKE_CASE ) else: # Finally we have to deal with imports fitting on one line __a : Optional[int] = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE ) return import_statement def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str=True ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Optional[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __a : int = split_code_in_indented_blocks( _SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __a : str = main_blocks[block_idx] __a : List[str] = block.split('\n' ) # Get to the start of the imports. __a : str = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __a : str = len(_SCREAMING_SNAKE_CASE ) else: line_idx += 1 if line_idx >= len(_SCREAMING_SNAKE_CASE ): continue # Ignore beginning and last line: they don't contain anything. __a : Any = '\n'.join(block_lines[line_idx:-1] ) __a : List[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __a : Tuple = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE ) # We have two categories of import key: list or _import_structure[key].append/extend __a : Any = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __a : Dict = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __a : str = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None] __a : Tuple = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __a : int = 0 __a : List[str] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: __a : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_SCREAMING_SNAKE_CASE ) count += 1 # And we put our main block back together with its first and last line. __a : str = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_SCREAMING_SNAKE_CASE ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str=True ): __a : Optional[int] = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __a : int = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=_SCREAMING_SNAKE_CASE ) if result: __a : str = [os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )] if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.""" ) if __name__ == "__main__": __lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowercase : List[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
27
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
1
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = AudioLDMPipeline A_ = TEXT_TO_AUDIO_PARAMS A_ = TEXT_TO_AUDIO_BATCH_PARAMS A_ = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , ) __a : Dict = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) __a : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __a : Any = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) __a : List[str] = ClapTextModelWithProjection(__a ) __a : Any = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 ) __a : str = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , ) __a : Optional[int] = SpeechTaHifiGan(__a ) __a : Dict = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' if str(__a ).startswith('mps' ): __a : List[str] = torch.manual_seed(__a ) else: __a : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a ) __a : int = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : Tuple = self.get_dummy_components() __a : List[str] = AudioLDMPipeline(**__a ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Any = audioldm_pipe(**__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __a : Dict = audio[:10] __a : Union[str, Any] = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_dummy_components() __a : Optional[Any] = AudioLDMPipeline(**__a ) __a : Any = audioldm_pipe.to(__a ) __a : List[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Any = self.get_dummy_inputs(__a ) __a : Optional[int] = 3 * [inputs['prompt']] # forward __a : List[Any] = audioldm_pipe(**__a ) __a : str = output.audios[0] __a : Any = self.get_dummy_inputs(__a ) __a : Optional[Any] = 3 * [inputs.pop('prompt' )] __a : int = audioldm_pipe.tokenizer( __a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , ) __a : str = text_inputs['input_ids'].to(__a ) __a : Optional[int] = audioldm_pipe.text_encoder( __a , ) __a : int = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __a : Union[str, Any] = F.normalize(__a , dim=-1 ) __a : Optional[int] = prompt_embeds # forward __a : Union[str, Any] = audioldm_pipe(**__a ) __a : Optional[int] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_dummy_components() __a : Optional[int] = AudioLDMPipeline(**__a ) __a : Union[str, Any] = audioldm_pipe.to(__a ) __a : Union[str, Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : str = self.get_dummy_inputs(__a ) __a : List[str] = 3 * ['this is a negative prompt'] __a : int = negative_prompt __a : List[Any] = 3 * [inputs['prompt']] # forward __a : Dict = audioldm_pipe(**__a ) __a : str = output.audios[0] __a : List[Any] = self.get_dummy_inputs(__a ) __a : List[str] = 3 * [inputs.pop('prompt' )] __a : int = [] for p in [prompt, negative_prompt]: __a : List[str] = audioldm_pipe.tokenizer( __a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , ) __a : Dict = text_inputs['input_ids'].to(__a ) __a : List[Any] = audioldm_pipe.text_encoder( __a , ) __a : Tuple = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __a : Optional[int] = F.normalize(__a , dim=-1 ) embeds.append(__a ) __a , __a : Union[str, Any] = embeds # forward __a : List[str] = audioldm_pipe(**__a ) __a : Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : Tuple = self.get_dummy_components() __a : Optional[int] = PNDMScheduler(skip_prk_steps=__a ) __a : List[Any] = AudioLDMPipeline(**__a ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[str] = self.get_dummy_inputs(__a ) __a : str = 'egg cracking' __a : Union[str, Any] = audioldm_pipe(**__a , negative_prompt=__a ) __a : List[str] = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __a : Tuple = audio[:10] __a : List[Any] = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : int = self.get_dummy_components() __a : Dict = PNDMScheduler(skip_prk_steps=__a ) __a : Any = AudioLDMPipeline(**__a ) __a : Dict = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[str] = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) __a : str = audioldm_pipe(__a , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __a : Tuple = 2 __a : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __a : Tuple = 2 __a : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __a : Optional[Any] = 2 __a : List[str] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : int = self.get_dummy_components() __a : Optional[Any] = AudioLDMPipeline(**__a ) __a : Any = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Dict = audioldm_pipe.vocoder.config.sampling_rate __a : Union[str, Any] = self.get_dummy_inputs(__a ) __a : str = audioldm_pipe(audio_length_in_s=0.016 , **__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.016 __a : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.032 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_dummy_components() __a : int = AudioLDMPipeline(**__a ) __a : Optional[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = ['hey'] __a : Optional[Any] = audioldm_pipe(__a , num_inference_steps=1 ) __a : int = output.audios.shape assert audio_shape == (1, 256) __a : str = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __a : Any = SpeechTaHifiGan(__a ).to(__a ) __a : str = audioldm_pipe(__a , num_inference_steps=1 ) __a : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__a ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a ) @slow class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self , __a , __a="cpu" , __a=torch.floataa , __a=0 ): '''simple docstring''' __a : Tuple = torch.Generator(device=__a ).manual_seed(__a ) __a : List[str] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) ) __a : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a ) __a : Optional[Any] = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = self.get_inputs(__a ) __a : Optional[Any] = 25 __a : List[Any] = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __a : List[str] = audio[7_7230:7_7240] __a : str = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) __a : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) __a : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __a : Optional[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Dict = self.get_inputs(__a ) __a : Tuple = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __a : int = audio[2_7780:2_7790] __a : str = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) __a : Optional[Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import qiskit def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : Any = qiskit.Aer.get_backend('aer_simulator' ) __a : int = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator __a : Any = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : Dict = parent __a : Tuple = batch_size __a : str = seq_length __a : str = is_training __a : Any = use_input_mask __a : Dict = use_token_type_ids __a : List[Any] = use_labels __a : Tuple = vocab_size __a : Optional[int] = hidden_size __a : str = num_hidden_layers __a : Union[str, Any] = num_attention_heads __a : int = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : str = type_vocab_size __a : List[Any] = type_sequence_label_size __a : Optional[int] = initializer_range __a : Tuple = num_labels __a : Any = num_choices __a : Any = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Tuple = None if self.use_input_mask: __a : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __a : Optional[int] = None if self.use_token_type_ids: __a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : Any = None __a : Dict = None if self.use_labels: __a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : int = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = BioGptModel(config=__a ) model.to(__a ) model.eval() __a : int = model(__a , attention_mask=__a ) __a : Dict = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : str = BioGptForCausalLM(config=__a ) model.to(__a ) model.eval() __a : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : Optional[Any] = BioGptModel(config=__a ) model.to(__a ) model.eval() # create attention mask __a : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=__a ) __a : Tuple = self.seq_length // 2 __a : Union[str, Any] = 0 # first forward pass __a , __a : Union[str, Any] = model(__a , attention_mask=__a ).to_tuple() # create hypothetical next token and extent to next_input_ids __a : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __a : int = ids_tensor((1,) , __a ).item() + 1 __a : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __a : Tuple = random_other_next_tokens # append to next input_ids and attn_mask __a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __a : Dict = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , ) # get two different outputs __a : Union[str, Any] = model(__a , attention_mask=__a )['last_hidden_state'] __a : List[Any] = model(__a , past_key_values=__a , attention_mask=__a )['last_hidden_state'] # select random slice __a : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() __a : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : Optional[int] = BioGptModel(config=__a ).to(__a ).eval() __a : str = torch.ones(input_ids.shape , dtype=torch.long , device=__a ) # first forward pass __a : List[Any] = model(__a , attention_mask=__a , use_cache=__a ) __a , __a : Union[str, Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __a : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __a : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __a : Optional[Any] = model(__a , attention_mask=__a )['last_hidden_state'] __a : List[Any] = model(__a , attention_mask=__a , past_key_values=__a )[ 'last_hidden_state' ] # select random slice __a : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() __a : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False ): '''simple docstring''' __a : List[Any] = BioGptForCausalLM(__a ) model.to(__a ) if gradient_checkpointing: model.gradient_checkpointing_enable() __a : Any = model(__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __UpperCAmelCase ( self , __a , *__a ): '''simple docstring''' __a : Union[str, Any] = BioGptModel(__a ) __a : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ): '''simple docstring''' __a : List[str] = self.num_labels __a : Optional[int] = BioGptForTokenClassification(__a ) model.to(__a ) model.eval() __a : Tuple = model(__a , attention_mask=__a , token_type_ids=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = config_and_inputs __a : int = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) A_ = (BioGptForCausalLM,) if is_torch_available() else () A_ = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = BioGptModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(__a ) __a : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __a : Optional[int] = 'left' # Define PAD Token = EOS Token = 50256 __a : Union[str, Any] = tokenizer.eos_token __a : Tuple = model.config.eos_token_id # use different length sentences to test batching __a : Optional[Any] = [ 'Hello, my dog is a little', 'Today, I', ] __a : Optional[int] = tokenizer(__a , return_tensors='pt' , padding=__a ) __a : Dict = inputs['input_ids'].to(__a ) __a : List[Any] = model.generate( input_ids=__a , attention_mask=inputs['attention_mask'].to(__a ) , ) __a : Tuple = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__a ) __a : List[Any] = model.generate(input_ids=__a ) __a : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() __a : Optional[Any] = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__a ) __a : Any = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings ) __a : Tuple = tokenizer.batch_decode(__a , skip_special_tokens=__a ) __a : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) __a : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) __a : Optional[Any] = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : int = BioGptModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Tuple = 3 __a : Optional[int] = input_dict['input_ids'] __a : int = input_ids.ne(1 ).to(__a ) __a : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __a : Optional[int] = BioGptForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Tuple = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : str = 3 __a : List[str] = 'multi_label_classification' __a : Optional[Any] = input_dict['input_ids'] __a : List[Any] = input_ids.ne(1 ).to(__a ) __a : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __a : Dict = BioGptForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Tuple = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) __a : Optional[int] = torch.tensor([[2, 4805, 9, 656, 21]] ) __a : Dict = model(__a )[0] __a : int = 4_2384 __a : int = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __a ) __a : str = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __a : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(__a ) torch.manual_seed(0 ) __a : int = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__a ) __a : Optional[int] = model.generate( **__a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__a , ) __a : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__a ) __a : List[Any] = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(__a , __a )
27
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Optional[Any] = logging.get_logger(__name__) __lowercase : str = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] __lowercase : List[Any] = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) return sd def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=rename_keys_prefix ): __a : Any = OrderedDict() __a : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[str] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Tuple = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : Tuple = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : List[str] = {'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: __a : Optional[Any] = {'visual_embedding_dim': 2_048} elif "vqa" in checkpoint_path: __a : Optional[Any] = {'visual_embedding_dim': 2_048} elif "nlvr" in checkpoint_path: __a : Optional[int] = {'visual_embedding_dim': 1_024} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __a : Optional[int] = {'visual_embedding_dim': 512} __a : int = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Optional[int] = {'visual_embedding_dim': 2_048} __a : Union[str, Any] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : Union[str, Any] = {'visual_embedding_dim': 2_048, 'num_labels': 3_129} __a : Optional[int] = 'vqa' elif "nlvr" in checkpoint_path: __a : Union[str, Any] = { 'visual_embedding_dim': 1_024, 'num_labels': 2, } __a : Any = 'nlvr' __a : Optional[int] = VisualBertConfig(**_SCREAMING_SNAKE_CASE ) # Load State Dict __a : List[Any] = load_state_dict(_SCREAMING_SNAKE_CASE ) __a : Tuple = get_new_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if model_type == "pretraining": __a : Union[str, Any] = VisualBertForPreTraining(_SCREAMING_SNAKE_CASE ) elif model_type == "vqa": __a : List[Any] = VisualBertForQuestionAnswering(_SCREAMING_SNAKE_CASE ) elif model_type == "nlvr": __a : List[str] = VisualBertForVisualReasoning(_SCREAMING_SNAKE_CASE ) elif model_type == "multichoice": __a : int = VisualBertForMultipleChoice(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Save Checkpoints Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') __lowercase : Any = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
27
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __lowercase : Dict = '.' if __name__ == "__main__": __lowercase : Union[str, Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') __lowercase : Any = [] __lowercase : Tuple = [] with open(doctest_file_path) as fp: for line in fp: __lowercase : Dict = line.strip() __lowercase : List[str] = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __lowercase : List[Any] = '\n'.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
27
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): if number < 0: raise ValueError('number must not be negative' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __lowercase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' super().__init__() self.register_modules( vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def __UpperCAmelCase ( self , __a = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : str = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__a ) @torch.no_grad() def __call__( self , __a , __a = 512 , __a = 512 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , __a = None , **__a , ): '''simple docstring''' if isinstance(__a , __a ): __a : int = 1 elif isinstance(__a , __a ): __a : Any = len(__a ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__a )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__a )}.""" ) # get prompt text embeddings __a : Optional[int] = self.tokenizer( __a , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __a : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Dict = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : Any = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Tuple = text_embeddings.shape __a : Tuple = text_embeddings.repeat(1 , __a , 1 ) __a : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Tuple = [''] elif type(__a ) is not type(__a ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !=""" f""" {type(__a )}.""" ) elif isinstance(__a , __a ): __a : Union[str, Any] = [negative_prompt] elif batch_size != len(__a ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ' the batch size of `prompt`.' ) else: __a : Tuple = negative_prompt __a : List[str] = text_input_ids.shape[-1] __a : Dict = self.tokenizer( __a , padding='max_length' , max_length=__a , truncation=__a , return_tensors='pt' , ) __a : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[Any] = uncond_embeddings.shape[1] __a : Union[str, Any] = uncond_embeddings.repeat(__a , __a , 1 ) __a : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Optional[int] = torch.randn( __a , generator=__a , device='cpu' , dtype=__a ).to(self.device ) __a : Dict = torch.randn(__a , generator=__a , device='cpu' , dtype=__a ).to( self.device ) else: __a : Optional[Any] = torch.randn( __a , generator=__a , device=self.device , dtype=__a ) __a : Any = torch.randn(__a , generator=__a , device=self.device , dtype=__a ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Tuple = latents_reference.to(self.device ) __a : Dict = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : Optional[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : int = 0 if dx < 0 else dx __a : Dict = 0 if dy < 0 else dy __a : str = max(-dx , 0 ) __a : Optional[int] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : int = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__a ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : List[str] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[int] = {} if accepts_eta: __a : int = eta for i, t in enumerate(self.progress_bar(__a ) ): # expand the latents if we are doing classifier free guidance __a : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : int = self.scheduler.scale_model_input(__a , __a ) # predict the noise residual __a : List[str] = self.unet(__a , __a , encoder_hidden_states=__a ).sample # perform guidance if do_classifier_free_guidance: __a , __a : Any = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : int = self.scheduler.step(__a , __a , __a , **__a ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__a , __a , __a ) __a : List[Any] = 1 / 0.18215 * latents __a : List[Any] = self.vae.decode(__a ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : Tuple = self.feature_extractor(self.numpy_to_pil(__a ) , return_tensors='pt' ).to( self.device ) __a , __a : Optional[Any] = self.safety_checker( images=__a , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[Any] = None if output_type == "pil": __a : str = self.numpy_to_pil(__a ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
27
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : int = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'andreasmadsen/efficient_mlm_m0.40': ( 'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json' ), } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "roberta-prelayernorm" def __init__( self , __a=5_0265 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Optional[Any] = vocab_size __a : str = hidden_size __a : int = num_hidden_layers __a : Union[str, Any] = num_attention_heads __a : Any = hidden_act __a : Union[str, Any] = intermediate_size __a : int = hidden_dropout_prob __a : Optional[int] = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : str = type_vocab_size __a : Tuple = initializer_range __a : Any = layer_norm_eps __a : List[str] = position_embedding_type __a : str = use_cache __a : str = classifier_dropout class __UpperCamelCase ( lowerCAmelCase_ ): @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": __a : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
27
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
1
'''simple docstring''' from math import factorial def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('Please enter positive integers for n and k where n >= k' ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(40, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(10, 3)} ways that first, second and''', 'third place can be awarded.', )
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
1
'''simple docstring''' from collections.abc import Callable def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): __a : float = a __a : float = b if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function return a elif function(_SCREAMING_SNAKE_CASE ) == 0: return b elif ( function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: __a : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_SCREAMING_SNAKE_CASE ) == 0: return mid elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0: __a : List[Any] = mid else: __a : List[Any] = mid __a : Dict = start + (end - start) / 2.0 return mid def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 10_00)) import doctest doctest.testmod()
27
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : int = { 'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "altclip_text_model" def __init__( self , __a=25_0002 , __a=1024 , __a=24 , __a=16 , __a=4096 , __a="gelu" , __a=0.1 , __a=0.1 , __a=514 , __a=1 , __a=0.02 , __a=0.02 , __a=1E-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=768 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Dict = vocab_size __a : List[Any] = hidden_size __a : Optional[Any] = num_hidden_layers __a : Dict = num_attention_heads __a : List[Any] = hidden_act __a : Tuple = intermediate_size __a : Optional[int] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : Union[str, Any] = type_vocab_size __a : Tuple = initializer_range __a : List[str] = initializer_factor __a : Optional[Any] = layer_norm_eps __a : List[str] = position_embedding_type __a : int = use_cache __a : Any = project_dim class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "altclip_vision_model" def __init__( self , __a=768 , __a=3072 , __a=512 , __a=12 , __a=12 , __a=3 , __a=224 , __a=32 , __a="quick_gelu" , __a=1E-5 , __a=0.0 , __a=0.02 , __a=1.0 , **__a , ): '''simple docstring''' super().__init__(**__a ) __a : str = hidden_size __a : Dict = intermediate_size __a : str = projection_dim __a : Optional[int] = num_hidden_layers __a : Optional[Any] = num_attention_heads __a : Dict = num_channels __a : Union[str, Any] = patch_size __a : List[Any] = image_size __a : Any = initializer_range __a : Tuple = initializer_factor __a : str = attention_dropout __a : Union[str, Any] = layer_norm_eps __a : Union[str, Any] = hidden_act @classmethod def __UpperCAmelCase ( cls , __a , **__a ): '''simple docstring''' cls._set_token_in_kwargs(__a ) __a , __a : List[str] = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __a : List[str] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "altclip" A_ = True def __init__( self , __a=None , __a=None , __a=768 , __a=2.6592 , **__a ): '''simple docstring''' __a : str = kwargs.pop('text_config_dict' , __a ) __a : List[str] = kwargs.pop('vision_config_dict' , __a ) super().__init__(**__a ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __a : Dict = {} # This is the complete result when using `text_config_dict`. __a : Any = AltCLIPTextConfig(**__a ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __a : Tuple = ( f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ f"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __a : List[Any] = ( f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ f"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(__a ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __a : Optional[int] = {} # This is the complete result when using `vision_config_dict`. __a : Union[str, Any] = AltCLIPVisionConfig(**__a ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __a : Dict = { str(__a ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __a : Optional[Any] = ( f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __a : Any = ( f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ f"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(__a ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __a : int = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __a : Tuple = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __a : int = AltCLIPTextConfig(**__a ) __a : Union[str, Any] = AltCLIPVisionConfig(**__a ) __a : Optional[int] = projection_dim __a : List[Any] = logit_scale_init_value __a : Any = 1.0 @classmethod def __UpperCAmelCase ( cls , __a , __a , **__a ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = copy.deepcopy(self.__dict__ ) __a : Optional[Any] = self.text_config.to_dict() __a : Optional[Any] = self.vision_config.to_dict() __a : Dict = self.__class__.model_type return output
27
'''simple docstring''' import torch from transformers import AutoModel class __UpperCamelCase ( torch.nn.Module ): def __init__( self , __a="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__a , self ).__init__() __a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a ) __a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 ) __a : Union[str, Any] = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return self.bert(**__a ).last_hidden_state def __UpperCAmelCase ( self , __a ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__a ) def __UpperCAmelCase ( self , __a , __a , __a=1 ): '''simple docstring''' return self.softmax(T * self.cos(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = W_supports['sizes'].tolist() __a : Union[str, Any] = W_supports['start_token_id'].item() __a : Any = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __a : Tuple = self.BERT(**__a ) __a : str = self.BERT(**__a ) __a : Any = None __a : Dict = None __a : Dict = W_supports['input_ids'] == start_token_id __a : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: __a : Optional[int] = 0 else: __a : Union[str, Any] = support_sizes[i - 1] __a : int = S[s : s + size][start_token_masks[s : s + size]] __a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] __a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __a : str = torch.vstack((p_starts, p_start) ) __a : str = torch.vstack((p_ends, p_end) ) else: __a : List[str] = p_start __a : int = p_end return p_starts, p_ends
27
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = KandinskyImgaImgPipeline A_ = ["prompt", "image_embeds", "negative_image_embeds", "image"] A_ = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] A_ = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A_ = False @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 100 @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Tuple = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __a : Any = MultilingualCLIP(__a ) __a : int = text_encoder.eval() return text_encoder @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Dict = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __a : str = UNetaDConditionModel(**__a ) return model @property def __UpperCAmelCase ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.dummy_text_encoder __a : Any = self.dummy_tokenizer __a : Union[str, Any] = self.dummy_unet __a : List[Any] = self.dummy_movq __a : Dict = { 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } __a : Dict = DDIMScheduler(**__a ) __a : Optional[int] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a ) __a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a ) # create init_image __a : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a ) __a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : List[Any] = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) ) if str(__a ).startswith('mps' ): __a : str = torch.manual_seed(__a ) else: __a : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a ) __a : Optional[int] = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = 'cpu' __a : Any = self.get_dummy_components() __a : List[Any] = self.pipeline_class(**__a ) __a : Optional[int] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = pipe(**self.get_dummy_inputs(__a ) ) __a : int = output.images __a : int = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __a : Optional[Any] = image[0, -3:, -3:, -1] __a : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __a : Optional[Any] = 'A red cartoon frog, 4k' __a : int = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__a ) __a : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa ) __a : Optional[int] = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) __a : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) __a , __a : List[str] = pipe_prior( __a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __a : Union[str, Any] = pipeline( __a , image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) __a : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a )
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): def merge(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_SCREAMING_SNAKE_CASE ) <= 1: return collection __a : int = len(_SCREAMING_SNAKE_CASE ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowercase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() __lowercase : Optional[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
27
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ort.SessionOptions() __a : Dict = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) __a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default __a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'A red cat sitting on a park bench' __a : int = np.random.RandomState(0 ) __a : Tuple = pipe( prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , ) __a : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Tuple = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase : Dict = 16 __lowercase : List[Any] = 32 def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __a : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *__a ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() __a : Dict = torch.cuda.memory_allocated() __a : List[Any] = torch.cuda.max_memory_allocated() __a : Tuple = bamb(self.end - self.begin ) __a : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ): __a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : List[Any] = load_dataset( 'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} ) def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ): # max_length=None => use the model max length (it's actually the default) __a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __a : List[str] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __a : int = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) __a : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Initialize accelerator __a : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Dict = config['lr'] __a : str = int(config['num_epochs'] ) __a : Optional[int] = int(config['seed'] ) __a : Any = int(config['batch_size'] ) __a : List[str] = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) __a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer __a : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __a : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __a : Union[str, Any] = 1 __a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __a : str = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: __a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __a : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly __a : Dict = 0 # Now we train the model __a : str = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): __a : List[Any] = model(**_SCREAMING_SNAKE_CASE ) __a : str = outputs.loss __a : str = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , ) __a : List[str] = parser.parse_args() __a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
27
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowercase : List[Any] = 'bart' __lowercase : Union[str, Any] = True @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __a : Optional[int] = qar_model.eval() else: __a , __a : str = (None, None) if MODEL_TYPE == "bart": __a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __a : str = sas_model.eval() else: __a , __a : Tuple = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : Optional[Any] = faiss.StandardGpuResources() __a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __a : int = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) __a : int = faiss.IndexFlatIP(128 ) __a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE ) wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU else: __a , __a : str = (None, None) __a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __a : Dict = elia['train_eli5'] __a : Optional[int] = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) __a : str = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_SCREAMING_SNAKE_CASE ) return (elia_train, eli5_train_q_index) __lowercase , __lowercase , __lowercase : Any = load_indexes() __lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models() __lowercase , __lowercase : int = load_train_data() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ): __a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]] return nn_examples def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ): if source == "none": __a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a : str = query_qa_dense_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a , __a : Union[str, Any] = query_es_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , ) __a : Dict = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None), } ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ): with torch.no_grad(): __a : Union[str, Any] = qa_sas_generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __lowercase : Dict = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options') if demo_options: __lowercase : Any = st.sidebar.selectbox( '', action_list, index=3, ) __lowercase : Tuple = action_list.index(action_st) __lowercase : Tuple = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __lowercase : List[Any] = show_type == 'Show full text of passages' else: __lowercase : int = 3 __lowercase : str = True __lowercase : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __lowercase : str = 'wiki40b' __lowercase : List[Any] = 'dense' __lowercase : Dict = 'beam' __lowercase : Optional[int] = 2 __lowercase : List[str] = 64 __lowercase : Tuple = 2_56 __lowercase : List[str] = None __lowercase : Tuple = None __lowercase : List[Any] = st.sidebar.checkbox('Generation options') if generate_options: __lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __lowercase : Tuple = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) __lowercase : int = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": __lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowercase : Dict = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __lowercase : Union[str, Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __lowercase : List[str] = None # start main text __lowercase : int = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowercase : Any = st.text_input('Enter your question here:', '') else: __lowercase : Any = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10) __lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) __lowercase : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowercase : str = support_list[:10] __lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __lowercase , __lowercase : int = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __lowercase : Any = res[1].strip() if sec_titles == "": __lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __lowercase : Union[str, Any] = sec_titles.split(' & ') __lowercase : str = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __lowercase : str = find_nearest_training(question) __lowercase : Optional[int] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __lowercase : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
27
1
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __UpperCamelCase ( nn.Module ): def __init__( self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ): '''simple docstring''' super().__init__() __a : List[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __a : List[Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __a : Optional[int] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __a : List[Any] = [1, 0] def __UpperCAmelCase ( self , __a , __a , __a=None , __a=None , __a=None , __a = True , ): '''simple docstring''' __a : Optional[int] = hidden_states __a : Dict = [] __a : Union[str, Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens __a : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __a : List[str] = self.transformer_index_for_condition[i] __a : List[str] = self.transformers[transformer_index]( __a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] __a : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __a : Tuple = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__a )
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Tuple = logging.get_logger(__name__) __lowercase : List[Any] = torch.device('cpu') def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = [] for k in state_dict.keys(): __a : List[Any] = k if ".pwconv" in k: __a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __a : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __a : Union[str, Any] = k_new.split('.' ) if ls[2].isdigit(): __a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Union[str, Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : str = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Any = idalabel __a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __a : Dict = [3, 3, 6, 4] __a : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __a : Dict = [3, 3, 9, 6] __a : List[str] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __a : Dict = [4, 3, 10, 5] __a : Optional[int] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __a : Tuple = [4, 4, 12, 6] __a : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : Optional[Any] = checkpoint __a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs __a : Tuple = prepare_img() __a : str = ViTImageProcessor.from_pretrained('preprocessor_config' ) __a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE ) __a : Dict = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') __lowercase : Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
27
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : Tuple = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a , __a : Dict = emb.weight.shape __a : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) __a : Tuple = emb.weight.data return lin_layer def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : Tuple=False ): __a : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = state_dict['encoder.embed_tokens.weight'].shape[0] __a : int = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE ) if mbart_aa and finetuned: __a : Union[str, Any] = 'relu' __a : Tuple = state_dict['decoder.embed_tokens.weight'] __a : Union[str, Any] = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE ) model.model.load_state_dict(_SCREAMING_SNAKE_CASE ) if finetuned: __a : int = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') __lowercase : Any = parser.parse_args() __lowercase : int = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
27
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "umt5" A_ = ["past_key_values"] def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ): '''simple docstring''' super().__init__( is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , ) __a : Any = vocab_size __a : Any = d_model __a : str = d_kv __a : Dict = d_ff __a : Union[str, Any] = num_layers __a : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a : Optional[int] = num_heads __a : Tuple = relative_attention_num_buckets __a : Optional[Any] = relative_attention_max_distance __a : Optional[int] = dropout_rate __a : List[Any] = layer_norm_epsilon __a : int = initializer_factor __a : Union[str, Any] = feed_forward_proj __a : Any = use_cache __a : List[Any] = self.feed_forward_proj.split('-' ) __a : Dict = act_info[-1] __a : Dict = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) if feed_forward_proj == "gated-gelu": __a : Optional[int] = 'gelu_new' @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.d_model @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_heads @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_layers class __UpperCamelCase ( lowerCAmelCase_ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __a : Dict = 'past_encoder_sequence + sequence' __a : Tuple = {0: 'batch'} __a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __a : List[Any] = {0: 'batch', 1: 'decoder_sequence'} __a : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __UpperCAmelCase ( self ): '''simple docstring''' return 13 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 5E-4
27
1
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = VideoMAEConfig() set_architecture_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "finetuned" not in model_name: __a : List[Any] = False if "finetuned" in model_name: __a : Tuple = 'huggingface/label-files' if "kinetics" in model_name: __a : List[str] = 400 __a : Optional[int] = 'kinetics400-id2label.json' elif "ssv2" in model_name: __a : Tuple = 174 __a : Optional[int] = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) __a : List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : List[str] = idalabel __a : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ): if "small" in model_name: __a : Any = 384 __a : List[str] = 1_536 __a : Optional[Any] = 12 __a : List[Any] = 16 __a : Optional[int] = 12 __a : List[Any] = 3 __a : int = 192 __a : Dict = 768 elif "large" in model_name: __a : Optional[Any] = 1_024 __a : List[Any] = 4_096 __a : List[Any] = 24 __a : Any = 16 __a : Dict = 12 __a : List[Any] = 8 __a : Optional[int] = 512 __a : int = 2_048 elif "huge" in model_name: __a : Union[str, Any] = 1_280 __a : Optional[int] = 5_120 __a : Tuple = 32 __a : Union[str, Any] = 16 __a : Any = 12 __a : str = 8 __a : List[str] = 640 __a : Dict = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if "encoder." in name: __a : List[Any] = name.replace('encoder.' , '' ) if "cls_token" in name: __a : str = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: __a : Optional[int] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: __a : str = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: __a : Optional[int] = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __a : Dict = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: __a : Tuple = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: __a : Any = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: __a : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: __a : List[Any] = name.replace('attn' , 'attention.self' ) if "attn" in name: __a : Optional[Any] = name.replace('attn' , 'attention.attention' ) if "norm1" in name: __a : Union[str, Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __a : List[Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __a : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __a : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: __a : str = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: __a : str = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: __a : List[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __a : List[str] = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __a : Dict = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: __a : str = name.replace('head' , 'classifier' ) return name def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): for key in orig_state_dict.copy().keys(): __a : List[Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith('encoder.' ): __a : str = key.replace('encoder.' , '' ) if "qkv" in key: __a : Tuple = key.split('.' ) if key.startswith('decoder.blocks' ): __a : Optional[Any] = config.decoder_hidden_size __a : Optional[int] = int(key_split[2] ) __a : Union[str, Any] = 'decoder.decoder_layers.' if "weight" in key: __a : List[Any] = val[:dim, :] __a : Optional[Any] = val[dim : dim * 2, :] __a : Union[str, Any] = val[-dim:, :] else: __a : Dict = config.hidden_size __a : Any = int(key_split[1] ) __a : Optional[int] = 'videomae.encoder.layer.' if "weight" in key: __a : List[Any] = val[:dim, :] __a : List[Any] = val[dim : dim * 2, :] __a : Any = val[-dim:, :] else: __a : Optional[Any] = val return orig_state_dict def lowerCamelCase (): __a : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) __a : Any = np.load(_SCREAMING_SNAKE_CASE ) return list(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : int = get_videomae_config(_SCREAMING_SNAKE_CASE ) if "finetuned" in model_name: __a : str = VideoMAEForVideoClassification(_SCREAMING_SNAKE_CASE ) else: __a : List[Any] = VideoMAEForPreTraining(_SCREAMING_SNAKE_CASE ) # download original checkpoint, hosted on Google Drive __a : Optional[int] = 'pytorch_model.bin' gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) if "model" in files: __a : List[Any] = files['model'] else: __a : int = files['module'] __a : str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # verify model on basic input __a : Optional[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __a : Tuple = prepare_video() __a : Any = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ) if "finetuned" not in model_name: __a : Any = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) __a : Tuple = torch.load(_SCREAMING_SNAKE_CASE ) __a : str = model(**_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = outputs.logits __a : Optional[Any] = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __a : Any = torch.Size([1, 400] ) __a : Optional[Any] = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __a : Optional[int] = torch.Size([1, 174] ) __a : Optional[Any] = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __a : int = torch.Size([1, 1_408, 1_536] ) __a : Optional[int] = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __a : Tuple = torch.Size([1, 1_408, 1_536] ) __a : Optional[Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __a : str = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __a : Optional[Any] = torch.Size([1, 1_408, 1_536] ) __a : str = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __a : List[str] = torch.Size([1, 400] ) __a : Optional[int] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __a : Optional[Any] = torch.Size([1, 400] ) __a : List[str] = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __a : Optional[int] = torch.Size([1, 400] ) __a : List[Any] = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __a : str = torch.Size([1, 400] ) __a : Any = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __a : Any = torch.Size([1, 1_408, 1_536] ) __a : int = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __a : str = torch.Size([1, 174] ) __a : Any = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __a : int = torch.Size([1, 1_408, 1_536] ) __a : int = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __a : str = torch.Size([1, 174] ) __a : Tuple = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(F"""Model name not supported. Should be one of {model_names}""" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": __a : Tuple = outputs.loss assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' ) if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
27
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): __a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' ) __a : Union[str, Any] = soup.findAll('h1' ) __a : int = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
27
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if b == 0: return 1 if (b % 2) == 0: return actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) else: return a * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if b < 0: return 1 / actual_power(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return actual_power(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(power(-2, -3))
27
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
1
'''simple docstring''' import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = 0 __a : List[Any] = 0 while num > 0: __a : List[Any] = num % 8 __a : Dict = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) )) counter += 1 __a : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(_SCREAMING_SNAKE_CASE )}""" def lowerCamelCase (): print('\n2 in octal is:' ) print(decimal_to_octal(2 ) ) # = 2 print('\n8 in octal is:' ) print(decimal_to_octal(8 ) ) # = 10 print('\n65 in octal is:' ) print(decimal_to_octal(65 ) ) # = 101 print('\n216 in octal is:' ) print(decimal_to_octal(216 ) ) # = 330 print('\n512 in octal is:' ) print(decimal_to_octal(512 ) ) # = 1000 print('\n' ) if __name__ == "__main__": main()
27
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __lowercase : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): for attribute in key.split('.' ): __a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: __a : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : str = value elif weight_type == "weight_v": __a : Optional[Any] = value elif weight_type == "bias": __a : Union[str, Any] = value else: __a : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : int = [] __a : List[str] = fairseq_model.state_dict() __a : Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __a : int = None for name, value in fairseq_dict.items(): __a : List[str] = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __a : List[str] = True elif name.split('.' )[0] == "proj": __a : Tuple = fairseq_model.proj __a : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __a : List[Any] = True if "*" in mapped_key: __a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: __a : List[Any] = 'weight_g' elif "weight_v" in name: __a : List[Any] = 'weight_v' elif "bias" in name: __a : Optional[Any] = 'bias' elif "weight" in name: __a : Tuple = 'weight' else: __a : Optional[Any] = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[str] = full_name.split('conv_layers.' )[-1] __a : Any = name.split('.' ) __a : List[str] = int(items[0] ) __a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a , __a : List[str] = emb.weight.shape __a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = emb.weight.data return lin_layer def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __a : Union[str, Any] = f.readlines() __a : Tuple = [line.split(' ' )[0] for line in lines] __a : int = len(_SCREAMING_SNAKE_CASE ) __a : List[Any] = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ): __a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Any = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) __a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __a : Optional[int] = model[0].eval() # set weights for wav2vec2 encoder __a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE ) __a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE ) __a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) __a : int = False # add projection layer __a : str = nn.Parameter(projection_layer.weight ) __a : Any = nn.Parameter(projection_layer.bias ) __a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = hf_wavavec.config.to_dict() __a : Tuple = tokenizer.pad_token_id __a : Optional[int] = tokenizer.bos_token_id __a : Union[str, Any] = tokenizer.eos_token_id __a : Tuple = 'speech_to_text_2' __a : Tuple = 'wav2vec2' __a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __lowercase : Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
27
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __lowercase : Union[str, Any] = Mapping[str, np.ndarray] __lowercase : Tuple = Mapping[str, Any] # Is a nested dict. __lowercase : Any = 0.01 @dataclasses.dataclass(frozen=lowerCAmelCase_ ) class __UpperCamelCase : A_ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. A_ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. A_ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. A_ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. A_ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions A_ = None # Optional remark about the protein. Included as a comment in output PDB # files A_ = None # Templates used to generate this protein (prediction-only) A_ = None # Chain corresponding to each parent A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = r'(\[[A-Z]+\]\n)' __a : List[str] = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0] __a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] ) __a : List[str] = ["N", "CA", "C"] __a : Dict = None __a : Optional[int] = None __a : Optional[Any] = None for g in groups: if "[PRIMARY]" == g[0]: __a : str = g[1][0].strip() for i in range(len(_SCREAMING_SNAKE_CASE ) ): if seq[i] not in residue_constants.restypes: __a : Union[str, Any] = 'X' # FIXME: strings are immutable __a : List[Any] = np.array( [residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __a : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) ) __a : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) __a : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __a : Dict = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __a : int = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) ) __a : str = np.zeros( ( len(_SCREAMING_SNAKE_CASE ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ): __a : List[str] = [] __a : Any = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) __a : Any = prot.parents __a : Tuple = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __a : Tuple = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id] if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0: __a : List[str] = ['N/A'] pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" ) return pdb_headers def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = [] __a : List[str] = pdb_str.split('\n' ) __a : List[Any] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) __a : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: __a : List[Any] = [] if prot.parents_chain_index is not None: __a : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] ) parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE ) __a : Any = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __a : str = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] ) parents_per_chain.append(_SCREAMING_SNAKE_CASE ) else: parents_per_chain.append(list(prot.parents ) ) else: __a : List[str] = [['N/A']] def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str: return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __a : int = 0 for i, l in enumerate(_SCREAMING_SNAKE_CASE ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_SCREAMING_SNAKE_CASE ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_SCREAMING_SNAKE_CASE ): __a : Optional[int] = parents_per_chain[chain_counter] else: __a : int = ['N/A'] out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) ) return "\n".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ): __a : Union[str, Any] = residue_constants.restypes + ['X'] def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , 'UNK' ) __a : Any = residue_constants.atom_types __a : List[str] = [] __a : str = prot.atom_mask __a : str = prot.aatype __a : str = prot.atom_positions __a : Union[str, Any] = prot.residue_index.astype(np.intaa ) __a : int = prot.b_factors __a : Tuple = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) __a : Tuple = get_pdb_headers(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: pdb_lines.extend(_SCREAMING_SNAKE_CASE ) __a : List[Any] = aatype.shape[0] __a : Any = 1 __a : List[Any] = 0 __a : Optional[Any] = string.ascii_uppercase __a : Any = None # Add all atom sites. for i in range(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __a : int = 'ATOM' __a : Tuple = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}""" __a : Union[str, Any] = '' __a : Tuple = '' __a : str = 1.0_0 __a : Union[str, Any] = atom_name[0] # Protein supports only C, N, O, S, this works. __a : str = '' __a : List[Any] = 'A' if chain_index is not None: __a : Any = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __a : int = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 __a : Union[str, Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __a : Tuple = True __a : Dict = chain_index[i + 1] if should_terminate: # Close the chain. __a : int = 'TER' __a : List[Any] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ): return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCamelCase (_SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ): return Protein( aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
27
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
1
'''simple docstring''' import unittest from transformers import DonutProcessor __lowercase : List[Any] = 'naver-clova-ix/donut-base' class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = DonutProcessor.from_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } __a : Tuple = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) __a : List[Any] = self.processor.tokenajson(__a ) self.assertDictEqual(__a , __a )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): if len(_SCREAMING_SNAKE_CASE ) != 2 or len(a[0] ) != 2 or len(_SCREAMING_SNAKE_CASE ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) __a : Tuple = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): if len(_SCREAMING_SNAKE_CASE ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) __a : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) __a : Dict = matrix_length // 2 __a : str = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )] __a : List[Any] = [ [a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] __a : Dict = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )] __a : Dict = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return top_left, top_right, bot_left, bot_right def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): return len(_SCREAMING_SNAKE_CASE ), len(matrix[0] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): print('\n'.join(str(_SCREAMING_SNAKE_CASE ) for line in matrix ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): if matrix_dimensions(_SCREAMING_SNAKE_CASE ) == (2, 2): return default_matrix_multiplication(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a , __a , __a : List[Any] = split_matrix(_SCREAMING_SNAKE_CASE ) __a , __a , __a , __a : Optional[int] = split_matrix(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a : List[Any] = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __a : str = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __a : str = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a : List[Any] = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a : List[Any] = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = matrix_addition(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # construct the new matrix from our 4 quadrants __a : Tuple = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): if matrix_dimensions(_SCREAMING_SNAKE_CASE )[1] != matrix_dimensions(_SCREAMING_SNAKE_CASE )[0]: __a : str = ( 'Unable to multiply these matrices, please check the dimensions.\n' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(_SCREAMING_SNAKE_CASE ) __a : Any = matrix_dimensions(_SCREAMING_SNAKE_CASE ) __a : Tuple = matrix_dimensions(_SCREAMING_SNAKE_CASE ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __a : Dict = max(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(_SCREAMING_SNAKE_CASE ) ) ) ) __a : Optional[int] = matrixa __a : Optional[int] = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , _SCREAMING_SNAKE_CASE ): if i < dimensiona[0]: for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __a : Dict = actual_strassen(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Removing the additional zeros for i in range(0 , _SCREAMING_SNAKE_CASE ): if i < dimensiona[0]: for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __lowercase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __lowercase : Dict = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 200 ): __a : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200] __a : int = [0] * (pence + 1) __a : List[str] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
27
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
1
'''simple docstring''' import warnings from functools import wraps from typing import Callable def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable ): @wraps(_SCREAMING_SNAKE_CASE ) def _inner_fn(*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[str] ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _SCREAMING_SNAKE_CASE , ) return fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return _inner_fn
27
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowercase : str = logging.get_logger(__name__) __lowercase : List[Any] = '▁' __lowercase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'} __lowercase : Any = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), } } __lowercase : List[Any] = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off __lowercase : List[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = ["input_ids", "attention_mask"] A_ = [] A_ = [] def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a = None , __a=None , **__a , ): '''simple docstring''' __a : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) __a : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __a : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : str = 1 __a : Any = len(self.sp_model ) __a : Dict = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a ) } __a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()} __a : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __a : List[str] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __a : Dict = src_lang if src_lang is not None else 'en_XX' __a : int = self.lang_code_to_id[self._src_lang] __a : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): '''simple docstring''' __a : int = self.__dict__.copy() __a : str = None __a : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , __a ): '''simple docstring''' __a : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Tuple = {} __a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __UpperCAmelCase ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Optional[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCAmelCase ( self , __a , __a = None , __a = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) __a : int = [1] * len(self.prefix_tokens ) __a : List[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : List[str] = [self.sep_token_id] __a : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self , __a , __a , __a , __a , **__a ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __a : Dict = src_lang __a : List[Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a ) __a : Any = self.convert_tokens_to_ids(__a ) __a : Any = tgt_lang_id return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.sp_model.encode(__a , out_type=__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : Dict = self.sp_model.PieceToId(__a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __UpperCAmelCase ( self , __a ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[str] = ''.join(__a ).replace(__a , ' ' ).strip() return out_string def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : Dict = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , 'wb' ) as fi: __a : int = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,) def __UpperCAmelCase ( self , __a , __a = "en_XX" , __a = None , __a = "ro_RO" , **__a , ): '''simple docstring''' __a : List[str] = src_lang __a : str = tgt_lang return super().prepare_seqaseq_batch(__a , __a , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCAmelCase ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = self.lang_code_to_id[src_lang] __a : List[Any] = [] __a : Dict = [self.eos_token_id, self.cur_lang_code] def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Optional[Any] = self.lang_code_to_id[lang] __a : int = [] __a : int = [self.eos_token_id, self.cur_lang_code]
27
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
1
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __lowercase : Dict = logging.getLogger() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ): __a : Union[str, Any] = '\n'.join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open('w' ).writelines(_SCREAMING_SNAKE_CASE ) __lowercase : Any = 'patrickvonplaten/t5-tiny-random' __lowercase : List[Any] = 'sshleifer/bart-tiny-random' __lowercase : Dict = 'sshleifer/tiny-mbart' __lowercase : Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' __a : Union[str, Any] = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() __a : List[str] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(__a , __a ) __a : int = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) __a : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization' __a : Optional[Any] = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(__a , 'argv' , __a ): run_generate() assert Path(__a ).exists() # os.remove(Path(output_file_name)) def __UpperCAmelCase ( self ): '''simple docstring''' self.run_eval_tester(__a ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.run_eval_tester(__a ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' __a : Union[str, Any] = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() __a : str = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } __a : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) __a : List[Any] = str(tmp_dir / 'scores.json' ) __a : Optional[Any] = str(tmp_dir / 'val.target' ) _dump_articles(__a , text['en'] ) _dump_articles(__a , text['de'] ) __a : List[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization' __a : Optional[int] = f""" run_eval_search.py {model} {str(__a )} {str(__a )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(__a , 'argv' , __a ): with CaptureStdout() as cs: run_search() __a : Optional[Any] = [' num_beams | length_penalty', model, 'Best score args'] __a : Dict = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(__a ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(__a ).exists() os.remove(Path(__a ) )
27
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
1
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import math class __UpperCamelCase : def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : Dict = 0.0 __a : Optional[int] = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def __UpperCAmelCase ( self , __a , __a , __a , __a ): '''simple docstring''' for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowerCamelCase (): # Training Examples ( m, n ) __a : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __a : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __a : Optional[Any] = SelfOrganizingMap() __a : Any = 3 __a : Tuple = 0.5 for _ in range(_SCREAMING_SNAKE_CASE ): for j in range(len(_SCREAMING_SNAKE_CASE ) ): # training sample __a : List[str] = training_samples[j] # Compute the winning vector __a : Union[str, Any] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Update the winning vector __a : int = self_organizing_map.update(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # classify test sample __a : Union[str, Any] = [0, 0, 0, 1] __a : Optional[int] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
27
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): __a : Optional[int] = len(_SCREAMING_SNAKE_CASE ) # We need to create solution object to save path. __a : Optional[Any] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] __a : Optional[int] = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE ) if solved: print('\n'.join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) ) else: print('No solution exists!' ) return solved def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ): __a : Any = len(_SCREAMING_SNAKE_CASE ) # Final check point. if i == j == (size - 1): __a : Optional[int] = 1 return True __a : Tuple = (not i < 0) and (not j < 0) # Check lower bounds __a : str = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __a : Optional[Any] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __a : List[str] = 1 # check for directions if ( run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE ) ): return True __a : Dict = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = list(_SCREAMING_SNAKE_CASE ) __a : List[Any] = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count += 1 __a : Any = '_' if count > 1: return False else: return "".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] ): __a : Any = [] while True: __a : Union[str, Any] = ['$'] * len(_SCREAMING_SNAKE_CASE ) __a : Tuple = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ): __a : List[Any] = compare_string(binary[i] , binary[j] ) if k is False: __a : Dict = '*' __a : Optional[Any] = '*' temp.append('X' ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_SCREAMING_SNAKE_CASE ) == 0: return pi __a : Union[str, Any] = list(set(_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ): __a : Optional[Any] = [] for minterm in minterms: __a : List[Any] = '' for _ in range(_SCREAMING_SNAKE_CASE ): __a : List[str] = str(minterm % 2 ) + string minterm //= 2 temp.append(_SCREAMING_SNAKE_CASE ) return temp def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ): __a : List[str] = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ): __a : int = [] __a : str = [0] * len(_SCREAMING_SNAKE_CASE ) for i in range(len(chart[0] ) ): __a : Any = 0 __a : Union[str, Any] = -1 for j in range(len(_SCREAMING_SNAKE_CASE ) ): if chart[j][i] == 1: count += 1 __a : Optional[int] = j if count == 1: __a : Optional[Any] = 1 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_SCREAMING_SNAKE_CASE ) ): __a : List[Any] = 0 temp.append(prime_implicants[i] ) while True: __a : Any = 0 __a : Any = -1 __a : int = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a : Any = chart[i].count(1 ) if count_n > max_n: __a : str = count_n __a : Any = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_SCREAMING_SNAKE_CASE ) ): __a : List[str] = 0 def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ): __a : int = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )] for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a : Union[str, Any] = prime_implicants[i].count('_' ) for j in range(len(_SCREAMING_SNAKE_CASE ) ): if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ): __a : int = 1 return chart def lowerCamelCase (): __a : Any = int(input('Enter the no. of variables\n' ) ) __a : Union[str, Any] = [ float(_SCREAMING_SNAKE_CASE ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] __a : List[str] = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = check(_SCREAMING_SNAKE_CASE ) print('Prime Implicants are:' ) print(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print('Essential Prime Implicants are:' ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() main()
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class __UpperCamelCase : A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} ) A_ = field( default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) A_ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} ) A_ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) A_ = field(default=2 , metadata={"help": "Batch size for training."} ) A_ = field(default=2 , metadata={"help": "Batch size for evaluation."} ) A_ = field(default=0.1 , metadata={"help": "Value of weight decay."} ) A_ = field( default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) A_ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} ) A_ = field(default="cosine" , metadata={"help": "Learning rate."} ) A_ = field( default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} ) A_ = field( default=16 , metadata={"help": "Number of gradient accumulation steps."} ) A_ = field( default=lowerCAmelCase_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) A_ = field(default=50000 , metadata={"help": "Maximum number of training steps."} ) A_ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) A_ = field(default=1024 , metadata={"help": "Sequence lengths used for training."} ) A_ = field(default=1 , metadata={"help": "Training seed."} ) A_ = field( default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , ) A_ = field( default=lowerCAmelCase_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} ) A_ = field(default=lowerCAmelCase_ , metadata={"help": "If True the data is pretokenized."} ) @dataclass class __UpperCamelCase : A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) A_ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) A_ = field(default=2 , metadata={"help": "Batch size used for evaluation."} ) A_ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) A_ = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} ) A_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) @dataclass class __UpperCamelCase : A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) A_ = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} ) A_ = field( default=lowerCAmelCase_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , ) A_ = field( default=lowerCAmelCase_ , metadata={"help": "Sample from the language model's output distribution."} ) A_ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} ) A_ = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} ) A_ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} ) A_ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} ) A_ = field(default=10 , metadata={"help": "Number of generations to run in parallel."} ) A_ = field( default=200 , metadata={"help": "Number of completions to generate for each sample."} ) A_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) A_ = field( default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} ) A_ = field( default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) A_ = field( default=-1 , metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) } , ) @dataclass class __UpperCamelCase : A_ = field( default=lowerCAmelCase_ , metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." } , ) A_ = field( default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} ) A_ = field( default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} ) A_ = field( default=100000 , metadata={"help": "Number of files to save per JSON output file."} ) A_ = field(default="content" , metadata={"help": "Column containing text data to process."} ) A_ = field( default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) A_ = field( default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) A_ = field( default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) A_ = field( default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) A_ = field( default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} ) A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , ) A_ = field( default=lowerCAmelCase_ , metadata={"help": "If True, near-duplicate samples are removed."} ) A_ = field( default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class __UpperCamelCase : A_ = field( default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} ) A_ = field( default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} ) A_ = field(default="content" , metadata={"help": "Column containing text data to process."} ) A_ = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} ) A_ = field( default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} ) A_ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} ) A_ = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} ) @dataclass class __UpperCamelCase : A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} ) A_ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} ) A_ = field( default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} ) A_ = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} ) @dataclass class __UpperCamelCase : A_ = field( default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} ) A_ = field( default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} ) A_ = field(default="codeparrot" , metadata={"help": "Name of the created model."} ) A_ = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
27
'''simple docstring''' import torch from transformers import AutoModel class __UpperCamelCase ( torch.nn.Module ): def __init__( self , __a="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__a , self ).__init__() __a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a ) __a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 ) __a : Union[str, Any] = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return self.bert(**__a ).last_hidden_state def __UpperCAmelCase ( self , __a ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__a ) def __UpperCAmelCase ( self , __a , __a , __a=1 ): '''simple docstring''' return self.softmax(T * self.cos(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = W_supports['sizes'].tolist() __a : Union[str, Any] = W_supports['start_token_id'].item() __a : Any = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __a : Tuple = self.BERT(**__a ) __a : str = self.BERT(**__a ) __a : Any = None __a : Dict = None __a : Dict = W_supports['input_ids'] == start_token_id __a : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: __a : Optional[int] = 0 else: __a : Union[str, Any] = support_sizes[i - 1] __a : int = S[s : s + size][start_token_masks[s : s + size]] __a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] __a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __a : str = torch.vstack((p_starts, p_start) ) __a : str = torch.vstack((p_ends, p_end) ) else: __a : List[str] = p_start __a : int = p_end return p_starts, p_ends
27
1
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a , __a=1024 , __a=1024 , __a=3.6 ): '''simple docstring''' __a : List[str] = tokenizer __a : List[Any] = tokenizer.bos_token_id __a : Tuple = dataset __a : Optional[int] = seq_length __a : Optional[int] = seq_length * chars_per_token * num_of_sequences def __iter__( self ): '''simple docstring''' __a : Optional[Any] = iter(self.dataset ) __a : Optional[int] = True while more_examples: __a , __a : Optional[int] = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__a )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __a : str = False break __a : Optional[int] = tokenizer(__a , truncation=__a )['input_ids'] __a : str = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__a ) , self.seq_length ): __a : int = all_token_ids[i : i + self.seq_length] if len(__a ) == self.seq_length: yield torch.tensor(__a ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : List[Any] = {'streaming': True} __a : int = load_dataset(args.dataset_name , split='train' , **_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length ) __a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) return eval_dataloader def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): model.eval() __a : Any = [] for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): with torch.no_grad(): __a : str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) __a : int = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __a : Union[str, Any] = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) ) try: __a : int = torch.exp(_SCREAMING_SNAKE_CASE ) except OverflowError: __a : str = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator __lowercase : List[Any] = Accelerator() # Parse configuration __lowercase : Optional[int] = HfArgumentParser(EvaluationArguments) __lowercase : int = parser.parse_args() set_seed(args.seed) # Logging __lowercase : str = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer __lowercase : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt) __lowercase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader __lowercase : List[str] = create_dataloader(args) # Prepare everything with our `accelerator`. __lowercase , __lowercase : Any = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') __lowercase , __lowercase : List[str] = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = LayoutLMTokenizer A_ = LayoutLMTokenizerFast A_ = True A_ = True def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() __a : List[Any] = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'UNwant\u00E9d,running' __a : Tuple = 'unwanted, running' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.tokenizer_class(self.vocab_file ) __a : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
27
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ort.SessionOptions() __a : Dict = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) __a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default __a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'A red cat sitting on a park bench' __a : int = np.random.RandomState(0 ) __a : Tuple = pipe( prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , ) __a : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
27
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowercase : Dict = logging.get_logger(__name__) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ): __a : List[str] = original_name.split('.' )[0] __a : Tuple = key.split('.' ) __a : Any = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] ) __a : Union[str, Any] = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] ) __a : List[Any] = orig_block_num - offset __a : List[Any] = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Optional[int] = OrderedDict() __a , __a : Union[str, Any] = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): __a : Optional[int] = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 __a : Dict = key[: key.find('proj' )] __a : Union[str, Any] = key.replace(_SCREAMING_SNAKE_CASE , F"""patch_embeddings.{total_embed_found}.""" ) __a : List[str] = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: __a : str = 'poolformer.encoder.' + key if "mlp.fc1" in key: __a : Any = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: __a : str = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: __a : Tuple = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'norm1' , 'before_norm' ) if "norm2" in key: __a : str = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: __a : Any = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: __a : Tuple = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: __a : List[str] = key.replace('head' , 'classifier' ) __a : int = value return new_state_dict def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Optional[int] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = PoolFormerConfig() # set attributes based on model_name __a : str = 'huggingface/label-files' __a : str = model_name[-3:] __a : Optional[int] = 1_000 __a : Optional[int] = 'imagenet-1k-id2label.json' __a : Any = (1, 1_000) # set config attributes __a : Optional[int] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Optional[Any] = idalabel __a : Optional[Any] = {v: k for k, v in idalabel.items()} if size == "s12": __a : int = [2, 2, 6, 2] __a : str = [64, 128, 320, 512] __a : Tuple = 4.0 __a : List[Any] = 0.9 elif size == "s24": __a : Union[str, Any] = [4, 4, 12, 4] __a : str = [64, 128, 320, 512] __a : Optional[Any] = 4.0 __a : Tuple = 0.9 elif size == "s36": __a : str = [6, 6, 18, 6] __a : str = [64, 128, 320, 512] __a : str = 4.0 __a : Any = 1e-6 __a : int = 0.9 elif size == "m36": __a : Any = [6, 6, 18, 6] __a : str = [96, 192, 384, 768] __a : Dict = 4.0 __a : Optional[Any] = 1e-6 __a : List[str] = 0.9_5 elif size == "m48": __a : Union[str, Any] = [8, 8, 24, 8] __a : List[str] = [96, 192, 384, 768] __a : Tuple = 4.0 __a : List[Any] = 1e-6 __a : int = 0.9_5 else: raise ValueError(F"""Size {size} not supported""" ) # load image processor __a : List[Any] = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE ) # Prepare image __a : Dict = prepare_img() __a : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict __a : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) ) # rename keys __a : Optional[int] = rename_keys(_SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict __a : Any = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() # Define image processor __a : Tuple = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE ) __a : Tuple = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass __a : int = model(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = outputs.logits # define expected logit slices for different models if size == "s12": __a : str = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": __a : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": __a : List[Any] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": __a : Any = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": __a : Dict = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(F"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __lowercase : Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
27
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase : Dict = 16 __lowercase : List[Any] = 32 def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __a : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *__a ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() __a : Dict = torch.cuda.memory_allocated() __a : List[Any] = torch.cuda.max_memory_allocated() __a : Tuple = bamb(self.end - self.begin ) __a : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ): __a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : List[Any] = load_dataset( 'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} ) def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ): # max_length=None => use the model max length (it's actually the default) __a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __a : List[str] = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __a : int = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) __a : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Initialize accelerator __a : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Dict = config['lr'] __a : str = int(config['num_epochs'] ) __a : Optional[int] = int(config['seed'] ) __a : Any = int(config['batch_size'] ) __a : List[str] = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) __a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer __a : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __a : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __a : Union[str, Any] = 1 __a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __a : str = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: __a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __a : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly __a : Dict = 0 # Now we train the model __a : str = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): __a : List[Any] = model(**_SCREAMING_SNAKE_CASE ) __a : str = outputs.loss __a : str = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , ) __a : List[str] = parser.parse_args() __a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowercase : Dict = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = ['YolosFeatureExtractor'] __lowercase : Optional[Any] = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : int = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowercase : List[Any] = 'bart' __lowercase : Union[str, Any] = True @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __a : Optional[int] = qar_model.eval() else: __a , __a : str = (None, None) if MODEL_TYPE == "bart": __a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __a : str = sas_model.eval() else: __a , __a : Tuple = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): if LOAD_DENSE_INDEX: __a : Optional[Any] = faiss.StandardGpuResources() __a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __a : int = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) __a : int = faiss.IndexFlatIP(128 ) __a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE ) wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU else: __a , __a : str = (None, None) __a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): __a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __a : Dict = elia['train_eli5'] __a : Optional[int] = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) __a : str = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_SCREAMING_SNAKE_CASE ) return (elia_train, eli5_train_q_index) __lowercase , __lowercase , __lowercase : Any = load_indexes() __lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models() __lowercase , __lowercase : int = load_train_data() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ): __a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]] return nn_examples def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ): if source == "none": __a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a : str = query_qa_dense_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a , __a : Union[str, Any] = query_es_index( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , ) __a : Dict = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None), } ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ): with torch.no_grad(): __a : Union[str, Any] = qa_sas_generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __lowercase : Dict = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options') if demo_options: __lowercase : Any = st.sidebar.selectbox( '', action_list, index=3, ) __lowercase : Tuple = action_list.index(action_st) __lowercase : Tuple = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __lowercase : List[Any] = show_type == 'Show full text of passages' else: __lowercase : int = 3 __lowercase : str = True __lowercase : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __lowercase : str = 'wiki40b' __lowercase : List[Any] = 'dense' __lowercase : Dict = 'beam' __lowercase : Optional[int] = 2 __lowercase : List[str] = 64 __lowercase : Tuple = 2_56 __lowercase : List[str] = None __lowercase : Tuple = None __lowercase : List[Any] = st.sidebar.checkbox('Generation options') if generate_options: __lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __lowercase : Tuple = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) __lowercase : int = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": __lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowercase : Dict = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __lowercase : Union[str, Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __lowercase : List[str] = None # start main text __lowercase : int = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowercase : Any = st.text_input('Enter your question here:', '') else: __lowercase : Any = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10) __lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) __lowercase : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowercase : str = support_list[:10] __lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __lowercase , __lowercase : int = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __lowercase : Any = res[1].strip() if sec_titles == "": __lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __lowercase : Union[str, Any] = sec_titles.split(' & ') __lowercase : str = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __lowercase : str = find_nearest_training(question) __lowercase : Optional[int] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __lowercase : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
27
1
'''simple docstring''' from __future__ import annotations from math import gcd def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 2 , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus for _ in range(_SCREAMING_SNAKE_CASE ): # These track the position within the cycle detection logic. __a : int = seed __a : Tuple = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __a : List[str] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[int] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __a : List[str] = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __a : List[str] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowercase : Any = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __lowercase : List[str] = parser.parse_args() __lowercase : List[str] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: __lowercase : List[str] = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Tuple = logging.get_logger(__name__) __lowercase : List[Any] = torch.device('cpu') def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = [] for k in state_dict.keys(): __a : List[Any] = k if ".pwconv" in k: __a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __a : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __a : Union[str, Any] = k_new.split('.' ) if ls[2].isdigit(): __a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Union[str, Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : str = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Any = idalabel __a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __a : Dict = [3, 3, 6, 4] __a : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __a : Dict = [3, 3, 9, 6] __a : List[str] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __a : Dict = [4, 3, 10, 5] __a : Optional[int] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __a : Tuple = [4, 4, 12, 6] __a : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : Optional[Any] = checkpoint __a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs __a : Tuple = prepare_img() __a : str = ViTImageProcessor.from_pretrained('preprocessor_config' ) __a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE ) __a : Dict = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') __lowercase : Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
27
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __lowercase : List[Any] = 2 class __UpperCamelCase : def __init__( self , *, # begin keyword-only arguments __a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=None , ): '''simple docstring''' __a , __a , __a , __a : Dict = bos, unk, pad, eos __a : Any = [] __a : List[str] = [] __a : List[str] = {} __a : Any = self.add_symbol(__a ) __a : int = self.add_symbol(__a ) __a : str = self.add_symbol(__a ) __a : Optional[Any] = self.add_symbol(__a ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__a ) __a : Tuple = len(self.symbols ) def __eq__( self , __a ): '''simple docstring''' return self.indices == other.indices def __getitem__( self , __a ): '''simple docstring''' if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): '''simple docstring''' return len(self.symbols ) def __contains__( self , __a ): '''simple docstring''' return sym in self.indices @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' __a : Tuple = cls() d.add_from_file(__a ) return d def __UpperCAmelCase ( self , __a , __a=1 , __a=False ): '''simple docstring''' if word in self.indices and not overwrite: __a : List[Any] = self.indices[word] __a : List[Any] = self.count[idx] + n return idx else: __a : Union[str, Any] = len(self.symbols ) __a : Tuple = idx self.symbols.append(__a ) self.count.append(__a ) return idx def __UpperCAmelCase ( self , __a ): '''simple docstring''' return 0 def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , __a ): try: with open(__a , 'r' , encoding='utf-8' ) as fd: self.add_from_file(__a ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__a ) ) return __a : List[Any] = f.readlines() __a : str = self._load_meta(__a ) for line in lines[indices_start_line:]: try: __a , __a : Optional[int] = line.rstrip().rsplit(' ' , 1 ) if field == "#fairseq:overwrite": __a : Optional[int] = True __a , __a : Optional[Any] = line.rsplit(' ' , 1 ) else: __a : List[str] = False __a : Any = int(__a ) __a : Optional[int] = line if word in self and not overwrite: raise RuntimeError( 'Duplicate word found when loading Dictionary: \'{}\'. ' 'Duplicate words can overwrite earlier ones by adding the ' '#fairseq:overwrite flag at the end of the corresponding row ' 'in the dictionary file. If using the Camembert model, please ' 'download an updated copy of the model file.'.format(__a ) ) self.add_symbol(__a , n=__a , overwrite=__a ) except ValueError: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __a : Tuple = dict((re.sub(r'@@$' , '' , _SCREAMING_SNAKE_CASE ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() ) __a : str = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __a : str = d[k] # restore return da def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ): # prep if not os.path.exists(_SCREAMING_SNAKE_CASE ): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'checkpoint.pt' ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" ) __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : List[Any] = chkpt['cfg']['model'] # dicts __a : str = os.path.join(_SCREAMING_SNAKE_CASE , 'dict.txt' ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F"""path to the file {dict_file} does not exist!""" ) __a : int = Dictionary.load(_SCREAMING_SNAKE_CASE ) __a : List[Any] = rewrite_dict_keys(src_dict.indices ) __a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) __a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['vocab_file'] ) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # merges_file (bpecodes) __a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'bpecodes' ) if not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" ) __a : Any = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['merges_file'] ) shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # model config __a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) __a : str = { 'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.0_2, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1e-1_2, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""" ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # tokenizer config __a : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : List[Any] = { 'bos_token': '<s>', 'eos_token': '</s>', 'model_max_length': 1_024, 'pad_token': '<pad>', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': '<unk>', } print(F"""Generating {biogpt_tokenizer_config_file}""" ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) ) # model __a : Union[str, Any] = chkpt['model'] # remove unneeded keys __a : Tuple = [ 'decoder.version', ] for k in ignore_keys: model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : List[str] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('output_projection.weight' ): __a : Optional[int] = model_state_dict.pop(_SCREAMING_SNAKE_CASE ) else: __a : int = model_state_dict.pop(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = BioGptConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = BioGptForCausalLM(_SCREAMING_SNAKE_CASE ) # check that it loads ok model_new.load_state_dict(_SCREAMING_SNAKE_CASE ) # save __a : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print('Conversion is done!' ) if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowercase : int = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
27
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowercase : Dict = logging.get_logger(__name__) __lowercase : Optional[Any] = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "umt5" A_ = ["past_key_values"] def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ): '''simple docstring''' super().__init__( is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , ) __a : Any = vocab_size __a : Any = d_model __a : str = d_kv __a : Dict = d_ff __a : Union[str, Any] = num_layers __a : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a : Optional[int] = num_heads __a : Tuple = relative_attention_num_buckets __a : Optional[Any] = relative_attention_max_distance __a : Optional[int] = dropout_rate __a : List[Any] = layer_norm_epsilon __a : int = initializer_factor __a : Union[str, Any] = feed_forward_proj __a : Any = use_cache __a : List[Any] = self.feed_forward_proj.split('-' ) __a : Dict = act_info[-1] __a : Dict = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) if feed_forward_proj == "gated-gelu": __a : Optional[int] = 'gelu_new' @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.d_model @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_heads @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.num_layers class __UpperCamelCase ( lowerCAmelCase_ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __a : Dict = 'past_encoder_sequence + sequence' __a : Tuple = {0: 'batch'} __a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __a : List[Any] = {0: 'batch', 1: 'decoder_sequence'} __a : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __UpperCAmelCase ( self ): '''simple docstring''' return 13 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 5E-4
27
1
'''simple docstring''' # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowercase : Union[str, Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowercase : List[Any] = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowercase : Optional[int] = '|'.join(sys.argv[1:]) __lowercase : Tuple = re.compile(Rf'''^({joined_dirs}).*?\.py$''') __lowercase : Dict = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
27
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): __a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' ) __a : Union[str, Any] = soup.findAll('h1' ) __a : int = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
27
1
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=False , __a=False , __a=False , __a=2 , __a=99 , __a=0 , __a=32 , __a=5 , __a=4 , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=2 , __a=4 , __a="last" , __a=True , __a=None , __a=0 , ): '''simple docstring''' __a : List[str] = parent __a : int = batch_size __a : str = seq_length __a : Optional[int] = is_training __a : Any = use_input_lengths __a : Optional[int] = use_token_type_ids __a : int = use_labels __a : Dict = gelu_activation __a : int = sinusoidal_embeddings __a : str = causal __a : Union[str, Any] = asm __a : Optional[Any] = n_langs __a : Union[str, Any] = vocab_size __a : List[str] = n_special __a : Tuple = hidden_size __a : Any = num_hidden_layers __a : Dict = num_attention_heads __a : Tuple = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : Optional[Any] = type_sequence_label_size __a : List[Any] = initializer_range __a : Tuple = num_labels __a : List[str] = num_choices __a : str = summary_type __a : int = use_proj __a : Dict = scope __a : Optional[int] = bos_token_id def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Tuple = None if self.use_input_lengths: __a : List[str] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __a : Optional[Any] = None if self.use_token_type_ids: __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __a : Optional[Any] = None __a : Optional[int] = None __a : List[Any] = None if self.use_labels: __a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[Any] = ids_tensor([self.batch_size] , 2 ).float() __a : Any = ids_tensor([self.batch_size] , self.num_choices ) __a : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __UpperCAmelCase ( self ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Any = XLMModel(config=__a ) model.to(__a ) model.eval() __a : Optional[Any] = model(__a , lengths=__a , langs=__a ) __a : Union[str, Any] = model(__a , langs=__a ) __a : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Optional[Any] = XLMWithLMHeadModel(__a ) model.to(__a ) model.eval() __a : Optional[Any] = model(__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[str] = XLMForQuestionAnsweringSimple(__a ) model.to(__a ) model.eval() __a : List[str] = model(__a ) __a : Optional[int] = model(__a , start_positions=__a , end_positions=__a ) __a : Optional[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Any = XLMForQuestionAnswering(__a ) model.to(__a ) model.eval() __a : Dict = model(__a ) __a : Optional[Any] = model( __a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , ) __a : List[Any] = model( __a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , ) ((__a) , ) : Union[str, Any] = result_with_labels.to_tuple() __a : Union[str, Any] = model(__a , start_positions=__a , end_positions=__a ) ((__a) , ) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[Any] = XLMForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a ) __a : Union[str, Any] = model(__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Any = XLMForTokenClassification(__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : str = self.num_choices __a : Union[str, Any] = XLMForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = config_and_inputs __a : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) A_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable A_ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : Optional[Any] = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __a : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) __a : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = XLMModelTester(self ) __a : Union[str, Any] = ConfigTester(self , config_class=__a , emb_dim=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ): '''simple docstring''' self.assertIsInstance(__a , __a ) self.assertListEqual( [isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) ) self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__a ): # adds PAD dummy token __a : Optional[Any] = min_length + idx + 1 __a : List[str] = min_length + idx + 1 __a : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ): '''simple docstring''' self.assertIsInstance(__a , __a ) self.assertListEqual( [isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , ) self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__a ): # adds PAD dummy token __a : List[str] = min_length + idx + 1 __a : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , ) pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = XLMModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(__a ) __a : Union[str, Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=__a ) # the president __a : int = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __a : Optional[Any] = model.generate(__a , do_sample=__a ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
27
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
1
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __UpperCamelCase ( pl.LightningModule ): def __init__( self , __a ): '''simple docstring''' super().__init__() __a : Optional[Any] = model __a : Any = 2 __a : Any = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): # load longformer model from model identifier __a : str = LongformerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Any = LightningModel(_SCREAMING_SNAKE_CASE ) __a : str = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) ) lightning_model.load_state_dict(ckpt['state_dict'] ) # init longformer question answering model __a : Any = LongformerForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowercase : List[Any] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
27
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __lowercase : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): for attribute in key.split('.' ): __a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: __a : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : str = value elif weight_type == "weight_v": __a : Optional[Any] = value elif weight_type == "bias": __a : Union[str, Any] = value else: __a : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : int = [] __a : List[str] = fairseq_model.state_dict() __a : Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __a : int = None for name, value in fairseq_dict.items(): __a : List[str] = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __a : List[str] = True elif name.split('.' )[0] == "proj": __a : Tuple = fairseq_model.proj __a : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __a : List[Any] = True if "*" in mapped_key: __a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: __a : List[Any] = 'weight_g' elif "weight_v" in name: __a : List[Any] = 'weight_v' elif "bias" in name: __a : Optional[Any] = 'bias' elif "weight" in name: __a : Tuple = 'weight' else: __a : Optional[Any] = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[str] = full_name.split('conv_layers.' )[-1] __a : Any = name.split('.' ) __a : List[str] = int(items[0] ) __a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a , __a : List[str] = emb.weight.shape __a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) __a : Optional[int] = emb.weight.data return lin_layer def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __a : Union[str, Any] = f.readlines() __a : Tuple = [line.split(' ' )[0] for line in lines] __a : int = len(_SCREAMING_SNAKE_CASE ) __a : List[Any] = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ): __a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Any = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) __a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __a : Optional[int] = model[0].eval() # set weights for wav2vec2 encoder __a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE ) __a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE ) __a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) __a : int = False # add projection layer __a : str = nn.Parameter(projection_layer.weight ) __a : Any = nn.Parameter(projection_layer.bias ) __a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = hf_wavavec.config.to_dict() __a : Tuple = tokenizer.pad_token_id __a : Optional[int] = tokenizer.bos_token_id __a : Union[str, Any] = tokenizer.eos_token_id __a : Tuple = 'speech_to_text_2' __a : Tuple = 'wav2vec2' __a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __lowercase : Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
27
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowercase : Optional[int] = 'base_with_context' def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ): __a : List[str] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) __a : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): __a : List[Any] = weights[F"""layers_{lyr_num}"""] __a : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) __a : Dict = ly_weight['attention'] __a : str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __a : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __a : Dict = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __a : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __a : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __a : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __a : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __a : Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : List[Any] = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) __a : Dict = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): __a : str = weights[F"""layers_{lyr_num}"""] __a : Optional[int] = ly_weight['attention'] __a : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __a : str = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __a : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __a : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __a : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) __a : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __a : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __a : Any = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __a : Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) __a : str = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __a : Optional[int] = weights[F"""layers_{lyr_num}"""] __a : List[str] = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) __a : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) __a : Optional[Any] = ly_weight['self_attention'] __a : str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __a : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __a : Optional[Any] = ly_weight['MultiHeadDotProductAttention_0'] __a : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __a : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __a : Dict = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __a : Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __a : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) __a : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __a : int = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) __a : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __a : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __a : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __a : Tuple = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) __a : int = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): __a : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __a : Union[str, Any] = jnp.tree_util.tree_map(onp.array , _SCREAMING_SNAKE_CASE ) __a : Any = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] __a : Optional[int] = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) __a : Any = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : str = inference.InferenceModel(args.checkpoint_path , _SCREAMING_SNAKE_CASE ) __a : Any = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) __a : str = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) __a : Any = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) __a : Union[str, Any] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __a : int = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _SCREAMING_SNAKE_CASE ) __a : Optional[int] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _SCREAMING_SNAKE_CASE ) __a : List[str] = load_decoder(ta_checkpoint['target']['decoder'] , _SCREAMING_SNAKE_CASE ) __a : Optional[int] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) __a : Optional[int] = SpectrogramDiffusionPipeline( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=f'''{MODEL}/checkpoint_500000''', type=str, required=False, help='Path to the original jax model checkpoint.', ) __lowercase : int = parser.parse_args() main(args)
27
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__a , 'num_heads' ) ) class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ): '''simple docstring''' __a : str = parent __a : List[Any] = batch_size __a : Optional[int] = image_size __a : List[str] = patch_sizes __a : str = patch_stride __a : Any = patch_padding __a : Dict = is_training __a : Union[str, Any] = use_labels __a : Dict = num_labels __a : List[Any] = num_channels __a : Any = embed_dim __a : int = num_heads __a : Optional[int] = stride_kv __a : Dict = depth __a : List[str] = cls_token __a : List[Any] = attention_drop_rate __a : Tuple = initializer_range __a : int = layer_norm_eps def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Dict = None if self.use_labels: # create a random int32 tensor of given shape __a : str = ids_tensor([self.batch_size] , self.num_labels ) __a : str = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = TFCvtModel(config=__a ) __a : Dict = model(__a , training=__a ) __a : Any = (self.image_size, self.image_size) __a , __a : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Optional[int] = TFCvtForImageClassification(__a ) __a : Dict = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtModelTester(self ) __a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) __a : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : List[str] = model_class(__a ) __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Union[str, Any] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = TFCvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __a : Tuple = self.default_image_processor __a : Any = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ) # forward pass __a : Any = model(**__a ) # verify the logits __a : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' from typing import Any class __UpperCamelCase : def __init__( self , __a ): '''simple docstring''' __a : Optional[int] = data __a : List[Any] = None def __repr__( self ): '''simple docstring''' return f"""Node({self.data})""" class __UpperCamelCase : def __init__( self ): '''simple docstring''' __a : Optional[Any] = None def __iter__( self ): '''simple docstring''' __a : List[str] = self.head while node: yield node.data __a : Union[str, Any] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(__a ) for item in self] ) def __getitem__( self , __a ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , __a , __a ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __a : Any = self.head for _ in range(__a ): __a : int = current.next __a : str = data def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.insert_nth(len(self ) , __a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.insert_nth(0 , __a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __a : List[Any] = Node(__a ) if self.head is None: __a : Optional[Any] = new_node elif index == 0: __a : Optional[Any] = self.head # link new_node to head __a : Union[str, Any] = new_node else: __a : Any = self.head for _ in range(index - 1 ): __a : Optional[int] = temp.next __a : List[Any] = temp.next __a : List[str] = new_node def __UpperCAmelCase ( self ): # print every node data '''simple docstring''' print(self ) def __UpperCAmelCase ( self ): '''simple docstring''' return self.delete_nth(0 ) def __UpperCAmelCase ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def __UpperCAmelCase ( self , __a = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __a : Optional[int] = self.head # default first node if index == 0: __a : Optional[Any] = self.head.next else: __a : int = self.head for _ in range(index - 1 ): __a : Any = temp.next __a : Any = temp.next __a : Any = temp.next.next return delete_node.data def __UpperCAmelCase ( self ): '''simple docstring''' return self.head is None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = None __a : Dict = self.head while current: # Store the current node's next node. __a : Optional[int] = current.next # Make the current node's next point backwards __a : Any = prev # Make the previous node be the current node __a : str = current # Make the current node the next node (to progress iteration) __a : Dict = next_node # Return prev in order to put the head at the end __a : Tuple = prev def lowerCamelCase (): __a : Tuple = LinkedList() assert linked_list.is_empty() is True assert str(_SCREAMING_SNAKE_CASE ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_SCREAMING_SNAKE_CASE ) == i linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 ) assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_SCREAMING_SNAKE_CASE ) == 9 assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __a : Dict = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) ) def lowerCamelCase (): __a : Tuple = [ -9, 100, Node(77_345_112 ), 'dlrow olleH', 7, 5_555, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] __a : Any = LinkedList() for i in test_input: linked_list.insert_tail(_SCREAMING_SNAKE_CASE ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __a : Union[str, Any] = linked_list.delete_head() assert result == -9 assert ( str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __a : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __a : Union[str, Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_SCREAMING_SNAKE_CASE ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_SCREAMING_SNAKE_CASE ) assert ( str(_SCREAMING_SNAKE_CASE ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_SCREAMING_SNAKE_CASE ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase (): from doctest import testmod testmod() __a : List[str] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_SCREAMING_SNAKE_CASE ) print('\nReading/changing Node data using indexing:' ) print(F"""Element at Position 1: {linked_list[1]}""" ) __a : Union[str, Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(_SCREAMING_SNAKE_CASE ) print(F"""length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}""" ) if __name__ == "__main__": main()
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Tuple = pytest.mark.integration __lowercase : Optional[int] = {'comet'} __lowercase : List[str] = importlib.util.find_spec('fairseq') is not None __lowercase : str = {'code_eval'} __lowercase : List[Any] = os.name == 'nt' __lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = '[...]' __a : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : Dict = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Optional[Any] = contextmanager(__a ) __a : str = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Dict = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : str = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : int = load_from_checkpoint yield def lowerCamelCase (): __a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : List[str] = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase : Tuple = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : int = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import re import string import numpy as np import datasets __lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] ) __a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] ) else: __a : int = np.asarray(__a ) __a : str = np.asarray(__a ) if ignore_case: __a : Dict = np.char.lower(__a ) __a : List[str] = np.char.lower(__a ) if ignore_punctuation: __a : Dict = string.punctuation.maketrans('' , '' , string.punctuation ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Dict = np.char.translate(__a , table=__a ) if ignore_numbers: __a : Optional[int] = string.digits.maketrans('' , '' , string.digits ) __a : Tuple = np.char.translate(__a , table=__a ) __a : Optional[int] = np.char.translate(__a , table=__a ) __a : Any = predictions == references return {"exact_match": np.mean(__a ) * 100}
27
1
'''simple docstring''' from math import ceil def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_001 ): __a : Union[str, Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __a : Dict = 2 * i + 1 __a : Tuple = 2 * i __a : Union[str, Any] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __lowercase : int = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
27
'''simple docstring''' import os import sys __lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowercase : int = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ): return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ): return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ): return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ): return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
27
1
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = inspect.getfile(accelerate.test_utils ) __a : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __a : Union[str, Any] = test_metrics @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __UpperCAmelCase ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def __UpperCAmelCase ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "WhisperFeatureExtractor" A_ = "WhisperTokenizer" def __init__( self , __a , __a ): '''simple docstring''' super().__init__(__a , __a ) __a : List[str] = self.feature_extractor __a : Optional[int] = False def __UpperCAmelCase ( self , __a=None , __a=None , __a=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a ) def __call__( self , *__a , **__a ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__a , **__a ) __a : Union[str, Any] = kwargs.pop('audio' , __a ) __a : Tuple = kwargs.pop('sampling_rate' , __a ) __a : str = kwargs.pop('text' , __a ) if len(__a ) > 0: __a : Optional[Any] = args[0] __a : List[Any] = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __a : List[Any] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) if text is not None: __a : Any = self.tokenizer(__a , **__a ) if text is None: return inputs elif audio is None: return encodings else: __a : Tuple = encodings['input_ids'] return inputs def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a="np" ): '''simple docstring''' return self.tokenizer.get_prompt_ids(__a , return_tensors=__a )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = UniSpeechSatForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = downstream_dict['projector.weight'] __a : Dict = downstream_dict['projector.bias'] __a : int = downstream_dict['model.post_net.linear.weight'] __a : List[str] = downstream_dict['model.post_net.linear.bias'] return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ): __a : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : Tuple = downstream_dict['model.linear.weight'] __a : str = downstream_dict['model.linear.bias'] return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): __a : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : List[Any] = downstream_dict['connector.weight'] __a : Union[str, Any] = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __a : List[Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] __a : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] __a : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] __a : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] __a : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] __a : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] __a : List[str] = downstream_dict['objective.W'] return model @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : List[Any] = checkpoint['Downstream'] __a : Any = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Tuple = WavaVecaFeatureExtractor.from_pretrained( _SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE ) __a : str = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): __a : Any = convert_classification(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif arch.endswith('ForAudioFrameClassification' ): __a : Union[str, Any] = convert_diarization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif arch.endswith('ForXVector' ): __a : List[Any] = convert_xvector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: __a : int = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowercase : Optional[int] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
27
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowercase : int = logging.get_logger(__name__) __lowercase : List[str] = {'vocab_file': 'vocab.json'} __lowercase : List[Any] = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __lowercase : Tuple = {'mgp-str': 27} class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a ): '''simple docstring''' super().__init__( unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , ) with open(__a , encoding='utf-8' ) as vocab_handle: __a : Dict = json.load(__a ) __a : Union[str, Any] = {v: k for k, v in self.vocab.items()} @property def __UpperCAmelCase ( self ): '''simple docstring''' return len(self.vocab ) def __UpperCAmelCase ( self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : str = [] for s in text: char_tokens.extend(__a ) return char_tokens def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.vocab.get(__a , self.vocab.get(self.unk_token ) ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.decoder.get(__a ) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error('Vocabulary path ({}) should be a directory'.format(__a ) ) return __a : Any = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(__a , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' ) return (vocab_file,)
27
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0 ): __a : int = [] for old_item in old_list: __a : Optional[int] = old_item.replace('in_layers.0' , 'norm1' ) __a : Tuple = new_item.replace('in_layers.2' , 'conv1' ) __a : Any = new_item.replace('out_layers.0' , 'norm2' ) __a : Optional[int] = new_item.replace('out_layers.3' , 'conv2' ) __a : int = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __a : Dict = new_item.replace('skip_connection' , 'conv_shortcut' ) __a : Tuple = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=0 ): __a : Union[str, Any] = [] for old_item in old_list: __a : Any = old_item __a : int = new_item.replace('norm.weight' , 'group_norm.weight' ) __a : Union[str, Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) __a : Tuple = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __a : List[str] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __a : int = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Dict=None ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __a : int = old_checkpoint[path] __a : Any = old_tensor.shape[0] // 3 __a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __a : Dict = old_tensor.shape[0] // config['num_head_channels'] // 3 __a : Dict = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __a , __a , __a : Dict = old_tensor.split(channels // num_heads , dim=1 ) __a : Optional[int] = query.reshape(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = key.reshape(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = value.reshape(_SCREAMING_SNAKE_CASE ) for path in paths: __a : Optional[int] = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __a : List[str] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __a : Any = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __a : Optional[int] = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __a : str = old_checkpoint[path['old']][:, :, 0] else: __a : Optional[Any] = old_checkpoint[path['old']] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Optional[Any] = {} __a : Dict = checkpoint['time_embed.0.weight'] __a : Any = checkpoint['time_embed.0.bias'] __a : Any = checkpoint['time_embed.2.weight'] __a : Optional[Any] = checkpoint['time_embed.2.bias'] __a : int = checkpoint['input_blocks.0.0.weight'] __a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] __a : List[str] = checkpoint['out.0.weight'] __a : int = checkpoint['out.0.bias'] __a : List[Any] = checkpoint['out.2.weight'] __a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __a : int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __a : Any = { layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the middle blocks only __a : str = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __a : Optional[int] = { layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the output blocks only __a : str = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __a : Tuple = { layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } for i in range(1 , _SCREAMING_SNAKE_CASE ): __a : Tuple = (i - 1) // (config['num_res_blocks'] + 1) __a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) __a : Dict = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key] __a : Optional[Any] = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key] if F"""input_blocks.{i}.0.op.weight""" in checkpoint: __a : Tuple = checkpoint[ F"""input_blocks.{i}.0.op.weight""" ] __a : int = checkpoint[ F"""input_blocks.{i}.0.op.bias""" ] continue __a : str = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) __a : str = {'old': F"""input_blocks.{i}.0""", 'new': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} __a : List[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): __a : Any = renew_attention_paths(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { 'old': F"""input_blocks.{i}.1""", 'new': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } __a : Tuple = { F"""input_blocks.{i}.1.qkv.bias""": { 'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, F"""input_blocks.{i}.1.qkv.weight""": { 'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , ) __a : Dict = middle_blocks[0] __a : List[Any] = middle_blocks[1] __a : Optional[Any] = middle_blocks[2] __a : Union[str, Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : Any = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE ) __a : List[str] = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): __a : Dict = i // (config['num_res_blocks'] + 1) __a : Tuple = i % (config['num_res_blocks'] + 1) __a : int = [shave_segments(_SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]] __a : Dict = {} for layer in output_block_layers: __a , __a : Optional[int] = layer.split('.' )[0], shave_segments(_SCREAMING_SNAKE_CASE , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = [layer_name] if len(_SCREAMING_SNAKE_CASE ) > 1: __a : List[str] = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key] __a : List[str] = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key] __a : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ) __a : Dict = {'old': F"""output_blocks.{i}.0""", 'new': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __a : Optional[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __a : List[Any] = checkpoint[ F"""output_blocks.{i}.{index}.conv.weight""" ] __a : str = checkpoint[ F"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(_SCREAMING_SNAKE_CASE ) == 2: __a : str = [] if len(_SCREAMING_SNAKE_CASE ): __a : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE ) __a : List[str] = { 'old': F"""output_blocks.{i}.1""", 'new': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } __a : Optional[int] = { F"""output_blocks.{i}.1.qkv.bias""": { 'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, F"""output_blocks.{i}.1.qkv.weight""": { 'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_SCREAMING_SNAKE_CASE , ) else: __a : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __a : List[str] = '.'.join(['output_blocks', str(_SCREAMING_SNAKE_CASE ), path['old']] ) __a : Optional[int] = '.'.join(['up_blocks', str(_SCREAMING_SNAKE_CASE ), 'resnets', str(_SCREAMING_SNAKE_CASE ), path['new']] ) __a : List[str] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowercase : str = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __lowercase : Dict = parser.parse_args() __lowercase : str = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowercase : Tuple = json.loads(f.read()) __lowercase : List[Any] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowercase : List[str] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowercase : List[str] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __lowercase : List[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __lowercase : Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionInpaintPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : str = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __a : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : Dict = CLIPTextModel(__a ) __a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) ) __a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) ) if str(__a ).startswith('mps' ): __a : Any = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a ) __a : List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Dict = sd_pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) __a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting' __a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : int = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : List[str] = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : int = torch.manual_seed(0 ) __a : Optional[Any] = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , ) __a : int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCAmelCase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) __a : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) __a : str = 'stabilityai/stable-diffusion-2-inpainting' __a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' ) __a : str = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __a : str = 'Face of a yellow cat, high resolution, sitting on a park bench' __a : Tuple = torch.manual_seed(0 ) __a : str = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ) __a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
27
1
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ort.SessionOptions() __a : Dict = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) __a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) __a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default __a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'A red cat sitting on a park bench' __a : int = np.random.RandomState(0 ) __a : Tuple = pipe( prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , ) __a : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
27
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __lowercase : Tuple = 'src/diffusers' # Matches is_xxx_available() __lowercase : Tuple = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla __lowercase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') __lowercase : Optional[Any] = '\n{0} = None\n' __lowercase : Tuple = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' __lowercase : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): __a : Union[str, Any] = _re_backend.findall(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 0: return None return "_and_".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (): with open(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: __a : Any = f.readlines() # Get to the point we do the actual imports for type checking __a : Any = 0 __a : Union[str, Any] = {} # Go through the end of the file while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __a : Tuple = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 __a : Union[str, Any] = [] # Until we unindent, add backend objects to the list while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1: __a : List[str] = lines[line_index] __a : Optional[Any] = _re_single_line_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_SCREAMING_SNAKE_CASE ) > 0: __a : List[str] = objects else: line_index += 1 return backend_specific_objects def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE ) elif name.islower(): return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple=None ): if backend_specific_objects is None: __a : List[str] = read_init() # For special correspondence backend to module name as used in the function requires_modulename __a : Optional[int] = {} for backend, objects in backend_specific_objects.items(): __a : int = '[' + ', '.join(F"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']' __a : str = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] ) __a : Optional[int] = dummy_file return dummy_files def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str]=False ): __a : Any = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __a : Any = {'torch': 'pt'} # Locate actual dummy modules and read their content. __a : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'utils' ) __a : List[Any] = { backend: os.path.join(_SCREAMING_SNAKE_CASE , F"""dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py""" ) for backend in dummy_files.keys() } __a : int = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __a : str = f.read() else: __a : Any = '' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"""Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main """ '__init__ has new objects.' ) with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' F"""diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """ 'to fix this.' ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __lowercase : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
27
'''simple docstring''' import torch from transformers import AutoModel class __UpperCamelCase ( torch.nn.Module ): def __init__( self , __a="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__a , self ).__init__() __a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a ) __a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 ) __a : Union[str, Any] = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return self.bert(**__a ).last_hidden_state def __UpperCAmelCase ( self , __a ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__a ) def __UpperCAmelCase ( self , __a , __a , __a=1 ): '''simple docstring''' return self.softmax(T * self.cos(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = W_supports['sizes'].tolist() __a : Union[str, Any] = W_supports['start_token_id'].item() __a : Any = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __a : Tuple = self.BERT(**__a ) __a : str = self.BERT(**__a ) __a : Any = None __a : Dict = None __a : Dict = W_supports['input_ids'] == start_token_id __a : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: __a : Optional[int] = 0 else: __a : Union[str, Any] = support_sizes[i - 1] __a : int = S[s : s + size][start_token_masks[s : s + size]] __a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] __a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __a : str = torch.vstack((p_starts, p_start) ) __a : str = torch.vstack((p_ends, p_end) ) else: __a : List[str] = p_start __a : int = p_end return p_starts, p_ends
27
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : List[Any] = logging.get_logger(__name__) __lowercase : str = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mobilenet_v2" def __init__( self , __a=3 , __a=224 , __a=1.0 , __a=8 , __a=8 , __a=6 , __a=32 , __a=True , __a=True , __a="relu6" , __a=True , __a=0.8 , __a=0.02 , __a=0.001 , __a=255 , **__a , ): '''simple docstring''' super().__init__(**__a ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) __a : Any = num_channels __a : Dict = image_size __a : Optional[Any] = depth_multiplier __a : List[str] = depth_divisible_by __a : List[str] = min_depth __a : Any = expand_ratio __a : Optional[Any] = output_stride __a : str = first_layer_is_expansion __a : Optional[Any] = finegrained_output __a : Optional[Any] = hidden_act __a : List[Any] = tf_padding __a : Optional[int] = classifier_dropout_prob __a : Union[str, Any] = initializer_range __a : int = layer_norm_eps __a : str = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase_ ): A_ = version.parse("1.11" ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return 1E-4
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1