code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger _A : Any =get_logger(__name__) class _lowercase : def __init__( self: Optional[Any] , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : str = ( os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCamelCase__ : int = Extractor def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCamelCase__ : int = os.path.abspath(UpperCamelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str , UpperCamelCase__: bool ): return force_extract or ( not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ )) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: bool = False ): lowerCamelCase__ : Any = self.extractor.infer_extractor_format(UpperCamelCase__ ) if not extractor_format: return input_path lowerCamelCase__ : str = self._get_output_path(UpperCamelCase__ ) if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ): self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return output_path class _lowercase ( _lowercase ): @classmethod @abstractmethod def lowerCamelCase_ ( cls: Tuple , UpperCamelCase__: Union[Path, str] , **UpperCamelCase__: int ): ... @staticmethod @abstractmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): ... class _lowercase ( _lowercase , _lowercase ): a = [] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: int ): with open(UpperCamelCase__ , """rb""" ) as f: return f.read(UpperCamelCase__ ) @classmethod def lowerCamelCase_ ( cls: str , UpperCamelCase__: Union[Path, str] , UpperCamelCase__: bytes = b"" ): if not magic_number: lowerCamelCase__ : str = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) try: lowerCamelCase__ : Optional[int] = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) except OSError: return False return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) class _lowercase ( _lowercase ): @classmethod def lowerCamelCase_ ( cls: int , UpperCamelCase__: Union[Path, str] , **UpperCamelCase__: Tuple ): return tarfile.is_tarfile(UpperCamelCase__ ) @staticmethod def lowerCamelCase_ ( UpperCamelCase__: str , UpperCamelCase__: int ): def resolved(UpperCamelCase__: str ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase__ ) ) def badpath(UpperCamelCase__: str , UpperCamelCase__: str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ ) def badlink(UpperCamelCase__: int , UpperCamelCase__: str ) -> bool: # Links are interpreted relative to the directory containing the link lowerCamelCase__ : Any = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = resolved(UpperCamelCase__ ) for finfo in members: if badpath(finfo.name , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tarfile.open(UpperCamelCase__ ) tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) ) tar_file.close() class _lowercase ( _lowercase ): a = [B"""\x1F\x8B"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class _lowercase ( _lowercase ): a = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def lowerCamelCase_ ( cls: Any , UpperCamelCase__: Union[Path, str] , UpperCamelCase__: bytes = b"" ): if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase__ , """rb""" ) as fp: lowerCamelCase__ : Optional[Any] = _EndRecData(UpperCamelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCamelCase__ : Any = fp.read(UpperCamelCase__ ) # CD is where we expect it to be if len(UpperCamelCase__ ) == sizeCentralDir: lowerCamelCase__ : Union[str, Any] = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file: zip_file.extractall(UpperCamelCase__ ) zip_file.close() class _lowercase ( _lowercase ): a = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): with lzma.open(UpperCamelCase__ ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class _lowercase ( _lowercase ): a = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) lowerCamelCase__ : str = rarfile.RarFile(UpperCamelCase__ ) rf.extractall(UpperCamelCase__ ) rf.close() class _lowercase ( _lowercase ): a = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd lowerCamelCase__ : Dict = zstd.ZstdDecompressor() with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh: dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ ) class _lowercase ( _lowercase ): a = [B"""\x42\x5A\x68"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class _lowercase ( _lowercase ): a = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive: archive.extractall(UpperCamelCase__ ) class _lowercase ( _lowercase ): a = [B"""\x04\x22\x4D\x18"""] @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] ): if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class _lowercase : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def lowerCamelCase_ ( cls: Tuple ): return max( len(UpperCamelCase__ ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase__ , UpperCamelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def lowerCamelCase_ ( UpperCamelCase__: Union[Path, str] , UpperCamelCase__: int ): try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ ) except OSError: return b"" @classmethod def lowerCamelCase_ ( cls: Optional[Any] , UpperCamelCase__: Union[Path, str] , UpperCamelCase__: bool = False ): warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = cls.infer_extractor_format(UpperCamelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def lowerCamelCase_ ( cls: List[str] , UpperCamelCase__: Union[Path, str] ): # <Added version="2.4.0"/> lowerCamelCase__ : int = cls._get_magic_number_max_length() lowerCamelCase__ : str = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return extractor_format @classmethod def lowerCamelCase_ ( cls: List[str] , UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Union[Path, str] , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[BaseExtractor] = "deprecated" , ): os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ ) # Prevent parallel extractions lowerCamelCase__ : Optional[Any] = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) ) with FileLock(UpperCamelCase__ ): shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=UpperCamelCase__ , ) lowerCamelCase__ : int = extractor if extractor != """deprecated""" else extractor_format else: lowerCamelCase__ : Dict = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase__ , UpperCamelCase__ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=UpperCamelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase__ ): return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
41
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
0
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowercase : List[str] = os.path.join(git_repo_path, "src", "diffusers") class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): """simple docstring""" _snake_case = find_backend(' if not is_torch_available():' ) self.assertEqual(lowerCAmelCase_ , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , lowerCAmelCase_ ) self.assertIn('torch_and_transformers' , lowerCAmelCase_ ) self.assertIn('flax_and_transformers' , lowerCAmelCase_ ) self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' ) _snake_case = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) _snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _snake_case = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
42
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : List[str] = """char""" a__ : str = """bpe""" a__ : Union[str, Any] = """wp""" __lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : List[Any] = ["""image_processor""", """char_tokenizer"""] a__ : Optional[Any] = """ViTImageProcessor""" a__ : Optional[Any] = """MgpstrTokenizer""" def __init__( self , __lowercase=None , __lowercase=None , **__lowercase) -> Any: __UpperCamelCase :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowercase , ) __UpperCamelCase :Any = kwargs.pop('''feature_extractor''') __UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') __UpperCamelCase :Optional[Any] = tokenizer __UpperCamelCase :Any = AutoTokenizer.from_pretrained('''gpt2''') __UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''') super().__init__(__lowercase , __lowercase) def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase) -> Tuple: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''') if images is not None: __UpperCamelCase :Union[str, Any] = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase) if text is not None: __UpperCamelCase :Dict = self.char_tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase) if text is None: return inputs elif images is None: return encodings else: __UpperCamelCase :int = encodings['''input_ids'''] return inputs def UpperCamelCase__ ( self , __lowercase) -> Any: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = sequences __UpperCamelCase :Dict = char_preds.size(0) __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._decode_helper(__lowercase , '''char''') __UpperCamelCase , __UpperCamelCase :Dict = self._decode_helper(__lowercase , '''bpe''') __UpperCamelCase , __UpperCamelCase :Optional[int] = self._decode_helper(__lowercase , '''wp''') __UpperCamelCase :Optional[int] = [] __UpperCamelCase :str = [] for i in range(__lowercase): __UpperCamelCase :Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]] __UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]] __UpperCamelCase :Optional[Any] = scores.index(max(__lowercase)) final_strs.append(strs[max_score_index]) final_scores.append(scores[max_score_index]) __UpperCamelCase :int = {} __UpperCamelCase :Dict = final_strs __UpperCamelCase :Tuple = final_scores __UpperCamelCase :Dict = char_strs __UpperCamelCase :List[str] = bpe_strs __UpperCamelCase :Union[str, Any] = wp_strs return out def UpperCamelCase__ ( self , __lowercase , __lowercase) -> str: if format == DecodeType.CHARACTER: __UpperCamelCase :Tuple = self.char_decode __UpperCamelCase :Optional[Any] = 1 __UpperCamelCase :Any = '''[s]''' elif format == DecodeType.BPE: __UpperCamelCase :int = self.bpe_decode __UpperCamelCase :Dict = 2 __UpperCamelCase :int = '''#''' elif format == DecodeType.WORDPIECE: __UpperCamelCase :Optional[int] = self.wp_decode __UpperCamelCase :List[str] = 102 __UpperCamelCase :Tuple = '''[SEP]''' else: raise ValueError(f"""Format {format} is not supported.""") __UpperCamelCase , __UpperCamelCase :List[str] = [], [] __UpperCamelCase :List[Any] = pred_logits.size(0) __UpperCamelCase :List[str] = pred_logits.size(1) __UpperCamelCase , __UpperCamelCase :List[str] = pred_logits.topk(1 , dim=-1 , largest=__lowercase , sorted=__lowercase) __UpperCamelCase :List[Any] = preds_index.view(-1 , __lowercase)[:, 1:] __UpperCamelCase :Optional[Any] = decoder(__lowercase) __UpperCamelCase , __UpperCamelCase :str = torch.nn.functional.softmax(__lowercase , dim=2).max(dim=2) __UpperCamelCase :str = preds_max_prob[:, 1:] for index in range(__lowercase): __UpperCamelCase :int = preds_str[index].find(__lowercase) __UpperCamelCase :Union[str, Any] = preds_str[index][:pred_eos] __UpperCamelCase :int = preds_index[index].cpu().tolist() __UpperCamelCase :str = pred_index.index(__lowercase) if eos_token in pred_index else -1 __UpperCamelCase :Optional[int] = preds_max_prob[index][: pred_eos_index + 1] __UpperCamelCase :str = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowercase) conf_scores.append(__lowercase) return dec_strs, conf_scores def UpperCamelCase__ ( self , __lowercase) -> Any: __UpperCamelCase :Optional[int] = [seq.replace(''' ''' , '''''') for seq in self.char_tokenizer.batch_decode(__lowercase)] return decode_strs def UpperCamelCase__ ( self , __lowercase) -> Optional[int]: return self.bpe_tokenizer.batch_decode(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> Tuple: __UpperCamelCase :Optional[int] = [seq.replace(''' ''' , '''''') for seq in self.wp_tokenizer.batch_decode(__lowercase)] return decode_strs
43
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __A ( unittest.TestCase ): def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} _lowerCAmelCase : List[str] = parent _lowerCAmelCase : Dict = batch_size _lowerCAmelCase : int = num_channels _lowerCAmelCase : Optional[Any] = min_resolution _lowerCAmelCase : Tuple = max_resolution _lowerCAmelCase : List[Any] = do_resize _lowerCAmelCase : Optional[Any] = size _lowerCAmelCase : Optional[int] = do_normalize _lowerCAmelCase : Optional[int] = image_mean _lowerCAmelCase : Dict = image_std _lowerCAmelCase : Optional[Any] = do_rescale _lowerCAmelCase : List[Any] = rescale_factor _lowerCAmelCase : int = do_pad def __A ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __A ( self , a__ , a__=False ): if not batched: _lowerCAmelCase : Optional[int] = image_inputs[0] if isinstance(a__ , Image.Image ): _lowerCAmelCase , _lowerCAmelCase : str = image.size else: _lowerCAmelCase , _lowerCAmelCase : Tuple = image.shape[1], image.shape[2] if w < h: _lowerCAmelCase : Any = int(self.size["""shortest_edge"""] * h / w ) _lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""] elif w > h: _lowerCAmelCase : Tuple = self.size["""shortest_edge"""] _lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h ) else: _lowerCAmelCase : List[Any] = self.size["""shortest_edge"""] _lowerCAmelCase : Any = self.size["""shortest_edge"""] else: _lowerCAmelCase : Optional[int] = [] for image in image_inputs: _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCAmelCase : List[str] = max(a__ , key=lambda a__ : item[0] )[0] _lowerCAmelCase : Any = max(a__ , key=lambda a__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None def __A ( self ): _lowerCAmelCase : Dict = ConditionalDetrImageProcessingTester(self ) @property def __A ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self ): _lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a__ , """image_mean""" ) ) self.assertTrue(hasattr(a__ , """image_std""" ) ) self.assertTrue(hasattr(a__ , """do_normalize""" ) ) self.assertTrue(hasattr(a__ , """do_resize""" ) ) self.assertTrue(hasattr(a__ , """size""" ) ) def __A ( self ): _lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , a__ ) _lowerCAmelCase : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a__ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , a__ ) def __A ( self ): pass def __A ( self ): # Initialize image_processing _lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ ) for image in image_inputs: self.assertIsInstance(a__ , Image.Image ) # Test not batched input _lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase , _lowerCAmelCase : str = self.image_processor_tester.get_expected_values(a__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ ) _lowerCAmelCase : Optional[Any] = image_processing(a__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self ): # Initialize image_processing _lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , np.ndarray ) # Test not batched input _lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase : str = image_processing(a__ , return_tensors="""pt""" ).pixel_values _lowerCAmelCase , _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(a__ , batched=a__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self ): # Initialize image_processing _lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) # Test not batched input _lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(a__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase : Tuple = image_processing(a__ , return_tensors="""pt""" ).pixel_values _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(a__ , batched=a__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __A ( self ): # prepare image and target _lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: _lowerCAmelCase : Optional[Any] = json.loads(f.read() ) _lowerCAmelCase : Optional[int] = {"""image_id""": 39769, """annotations""": target} # encode them _lowerCAmelCase : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) _lowerCAmelCase : Any = image_processing(images=a__ , annotations=a__ , return_tensors="""pt""" ) # verify pixel values _lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , a__ ) _lowerCAmelCase : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1e-4 ) ) # verify area _lowerCAmelCase : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) ) # verify boxes _lowerCAmelCase : str = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ ) _lowerCAmelCase : Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1e-3 ) ) # verify image_id _lowerCAmelCase : Any = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) ) # verify is_crowd _lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) ) # verify class_labels _lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) ) # verify orig_size _lowerCAmelCase : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) ) # verify size _lowerCAmelCase : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) ) @slow def __A ( self ): # prepare image, target and masks_path _lowerCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: _lowerCAmelCase : str = json.loads(f.read() ) _lowerCAmelCase : Dict = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} _lowerCAmelCase : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them _lowerCAmelCase : List[str] = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) _lowerCAmelCase : str = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="""pt""" ) # verify pixel values _lowerCAmelCase : List[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , a__ ) _lowerCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1e-4 ) ) # verify area _lowerCAmelCase : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) ) # verify boxes _lowerCAmelCase : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ ) _lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1e-3 ) ) # verify image_id _lowerCAmelCase : List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) ) # verify is_crowd _lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) ) # verify class_labels _lowerCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) ) # verify masks _lowerCAmelCase : int = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , a__ ) # verify orig_size _lowerCAmelCase : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) ) # verify size _lowerCAmelCase : Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
44
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
0
"""simple docstring""" import numpy as np def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray: return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
0
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' lowerCAmelCase = int(SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = t // 36_00, (t // 60) % 60, t % 60 return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}' def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=3_00 ): '''simple docstring''' return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n ' def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' lowerCAmelCase = """<table border=\"1\" class=\"dataframe\">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F' <th>{i}</th>\n' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCAmelCase = F'{elt:.6f}' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE ) html_code += F' <td>{elt}</td>\n' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class lowercase : _SCREAMING_SNAKE_CASE = 5 _SCREAMING_SNAKE_CASE = 0.2 def __init__( self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = 300 , ) -> List[Any]: lowerCAmelCase = total lowerCAmelCase = """""" if prefix is None else prefix lowerCAmelCase = leave lowerCAmelCase = parent lowerCAmelCase = width lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def _snake_case ( self , lowercase , lowercase = False , lowercase = None ) -> Optional[int]: lowerCAmelCase = value if comment is not None: lowerCAmelCase = comment if self.last_value is None: lowerCAmelCase = lowerCAmelCase = time.time() lowerCAmelCase = lowerCAmelCase = value lowerCAmelCase = lowerCAmelCase = None lowerCAmelCase = self.warmup lowerCAmelCase = 1 self.update_bar(lowercase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 lowerCAmelCase = time.time() lowerCAmelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCAmelCase = self.elapsed_time / (value - self.start_value) else: lowerCAmelCase = None if value >= self.total: lowerCAmelCase = self.total lowerCAmelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCAmelCase = self.average_time_per_item * (self.total - value) self.update_bar(lowercase ) lowerCAmelCase = value lowerCAmelCase = current_time if self.average_time_per_item is None: lowerCAmelCase = 1 else: lowerCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def _snake_case ( self , lowercase , lowercase=None ) -> Dict: lowerCAmelCase = """ """ * (len(str(self.total ) ) - len(str(lowercase ) )) + str(lowercase ) if self.elapsed_time is None: lowerCAmelCase = f'[{spaced_value}/{self.total} : < :' elif self.predicted_remaining is None: lowerCAmelCase = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}' else: lowerCAmelCase = ( f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <' f' {format_time(self.predicted_remaining )}' ) self.label += f', {1/self.average_time_per_item:.2f} it/s' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]' self.display() def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def _snake_case ( self ) -> Dict: if self.parent is None and self.output is not None: self.output.update(disp.HTML("""""" ) ) class lowercase ( _UpperCAmelCase ): def __init__( self , lowercase , lowercase=None ) -> List[str]: super().__init__(lowercase ) lowerCAmelCase = None if column_names is None else [column_names] lowerCAmelCase = None def _snake_case ( self ) -> Any: lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def _snake_case ( self , lowercase ) -> int: if self.inner_table is None: lowerCAmelCase = [list(values.keys() ), list(values.values() )] else: lowerCAmelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowercase ) lowerCAmelCase = columns self.inner_table.append([values[c] for c in columns] ) def _snake_case ( self , lowercase , lowercase=None , lowercase=300 ) -> List[Any]: lowerCAmelCase = NotebookProgressBar(lowercase , prefix=lowercase , parent=self , width=lowercase ) return self.child_bar def _snake_case ( self ) -> int: lowerCAmelCase = None self.display() class lowercase ( _UpperCAmelCase ): def __init__( self ) -> Union[str, Any]: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = False def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]: lowerCAmelCase = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step""" lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = [self.first_column] + ["""Training Loss"""] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("""Validation Loss""" ) lowerCAmelCase = NotebookTrainingTracker(state.max_steps , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict: lowerCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}' self.training_tracker.update( state.global_step + 1 , comment=f'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , ) lowerCAmelCase = False def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict: if not has_length(lowercase ): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCAmelCase = self.training_tracker.add_child(len(lowercase ) ) else: lowerCAmelCase = NotebookProgressBar(len(lowercase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict: if self.prediction_bar is not None: self.prediction_bar.close() lowerCAmelCase = None def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> List[Any]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCAmelCase = {"""Training Loss""": logs["""loss"""]} # First column is necessarily Step sine we're not in epoch eval strategy lowerCAmelCase = state.global_step self.training_tracker.write_line(lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> List[str]: if self.training_tracker is not None: lowerCAmelCase = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""} for log in reversed(state.log_history ): if "loss" in log: lowerCAmelCase = log["""loss"""] break if self.first_column == "Epoch": lowerCAmelCase = int(state.epoch ) else: lowerCAmelCase = state.global_step lowerCAmelCase = """eval""" for k in metrics: if k.endswith("""_loss""" ): lowerCAmelCase = re.sub(r"""\_loss$""" , """""" , lowercase ) lowerCAmelCase = metrics.pop("""total_flos""" , lowercase ) lowerCAmelCase = metrics.pop("""epoch""" , lowercase ) lowerCAmelCase = metrics.pop(f'{metric_key_prefix}_runtime' , lowercase ) lowerCAmelCase = metrics.pop(f'{metric_key_prefix}_samples_per_second' , lowercase ) lowerCAmelCase = metrics.pop(f'{metric_key_prefix}_steps_per_second' , lowercase ) lowerCAmelCase = metrics.pop(f'{metric_key_prefix}_jit_compilation_time' , lowercase ) for k, v in metrics.items(): if k == f'{metric_key_prefix}_loss': lowerCAmelCase = v else: lowerCAmelCase = k.split("""_""" ) lowerCAmelCase = """ """.join([part.capitalize() for part in splits[1:]] ) lowerCAmelCase = v self.training_tracker.write_line(lowercase ) self.training_tracker.remove_child() lowerCAmelCase = None # Evaluation takes a long time so we should force the next update. lowerCAmelCase = True def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]: self.training_tracker.update( state.global_step , comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=lowercase ) lowerCAmelCase = None
46
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : Dict = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) lowerCamelCase : Union[str, Any] = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCamelCase : Dict = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCamelCase : Any = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) lowerCamelCase : Union[str, Any] = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) lowerCamelCase : Any = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) lowerCamelCase : List[Any] = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) lowerCamelCase : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) lowerCamelCase : Optional[Any] = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) lowerCamelCase : List[str] = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) lowerCamelCase : Optional[int] = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) lowerCamelCase : str = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) lowerCamelCase : Dict = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) lowerCamelCase : Optional[int] = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) lowerCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCamelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCamelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCamelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCamelCase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCamelCase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_MAPPING lowerCamelCase : int = auto_class_update(FlaxAutoModel) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCamelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase : Any = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase : str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCamelCase : List[str] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class A__ ( _BaseAutoModelClass ): A__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
47
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
48
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _A : UpperCamelCase__ : Dict = BlenderbotSmallConfig UpperCamelCase__ : Tuple = {} UpperCamelCase__ : str = '''gelu''' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __a = tf.concat([input_ids, eos_tensor] , axis=1) __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __a = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return config, inputs_dict def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE).get_decoder() __a = inputs_dict['''input_ids'''] __a = input_ids[:1, :] __a = inputs_dict['''attention_mask'''][:1, :] __a = inputs_dict['''head_mask'''] __a = 1 # first forward pass __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE) __a , __a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size) __a = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __a = tf.concat([input_ids, next_tokens] , axis=-1) __a = tf.concat([attention_mask, next_attn_mask] , axis=-1) __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0] __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __a = int(ids_tensor((1,) , output_from_past.shape[-1])) __a = output_from_no_past[:, -3:, random_slice_idx] __a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: __a = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCamelCase__ : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ : str = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ : Optional[int] = True UpperCamelCase__ : int = False UpperCamelCase__ : str = False def _lowerCamelCase ( self : int): '''simple docstring''' __a = TFBlenderbotSmallModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE) @require_tokenizers @require_tf class _A ( unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] UpperCamelCase__ : str = '''facebook/blenderbot_small-90M''' @cached_property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''') @cached_property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.tokenizer(self.src_text , return_tensors='''tf''') __a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , ) __a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
49
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
0
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """bart""" UpperCAmelCase__ = ["""past_key_values"""] UpperCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Tuple , UpperCAmelCase : Optional[int]=50265 , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Dict=4096 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : List[Any]=4096 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Tuple=1024 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Any=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=3 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=2 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : Dict , ) -> Union[str, Any]: lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Optional[Any] = d_model lowerCamelCase__ : int = encoder_ffn_dim lowerCamelCase__ : int = encoder_layers lowerCamelCase__ : Optional[Any] = encoder_attention_heads lowerCamelCase__ : Dict = decoder_ffn_dim lowerCamelCase__ : Union[str, Any] = decoder_layers lowerCamelCase__ : List[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Optional[Any] = activation_dropout lowerCamelCase__ : str = activation_function lowerCamelCase__ : List[str] = init_std lowerCamelCase__ : Optional[Any] = encoder_layerdrop lowerCamelCase__ : Optional[int] = decoder_layerdrop lowerCamelCase__ : int = classifier_dropout lowerCamelCase__ : Union[str, Any] = use_cache lowerCamelCase__ : str = encoder_layers lowerCamelCase__ : str = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase ): lowerCamelCase__ : Dict = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ 'The config can simply be saved and uploaded again to be fixed.' ) class lowerCAmelCase ( __UpperCamelCase ): @property def A_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Any = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: lowerCamelCase__ : Tuple = {0: 'batch'} lowerCamelCase__ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} lowerCamelCase__ : Dict = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : int = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_layers for i in range(UpperCAmelCase ): lowerCamelCase__ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'} lowerCamelCase__ : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'} else: lowerCamelCase__ : Union[str, Any] = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = super().outputs else: lowerCamelCase__ : Optional[Any] = super(UpperCAmelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_layers for i in range(UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'} lowerCamelCase__ : List[str] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def A_ ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[int] = dict(**UpperCAmelCase , **UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Optional[int] = common_inputs['input_ids'].shape lowerCamelCase__ : List[str] = common_inputs['decoder_input_ids'].shape[1] lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_attention_heads lowerCamelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Dict = decoder_seq_length + 3 lowerCamelCase__ : int = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : Dict = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 ) lowerCamelCase__ : Any = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers lowerCamelCase__ : List[Any] = min(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers lowerCamelCase__ : str = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), ) ) # TODO: test this. lowerCamelCase__ : List[str] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(UpperCAmelCase , UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) ) return common_inputs def A_ ( self : Tuple , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowerCamelCase__ : Union[str, Any] = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs['attention_mask'].dtype lowerCamelCase__ : int = torch.cat( [common_inputs['attention_mask'], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 ) lowerCamelCase__ : Any = [ (torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase ) ] return common_inputs def A_ ( self : Optional[int] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase__ : Tuple = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(UpperCAmelCase ) lowerCamelCase__ : List[str] = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Dict = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) ) return common_inputs def A_ ( self : List[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) elif self.task == "causal-lm": lowerCamelCase__ : int = self._generate_dummy_inputs_for_causal_lm( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) else: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) return common_inputs def A_ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : int ) -> List[Any]: if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: lowerCamelCase__ : str = super(UpperCAmelCase , self )._flatten_past_key_values_( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __snake_case : def __init__( self : str , _snake_case : Tuple , _snake_case : List[str]=13 , _snake_case : Tuple=7 , _snake_case : Optional[Any]=True , _snake_case : List[str]=True , _snake_case : Dict=False , _snake_case : List[str]=True , _snake_case : Optional[int]=99 , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : List[Any]=4 , _snake_case : Optional[int]=37 , _snake_case : Optional[Any]="gelu" , _snake_case : Tuple=0.1 , _snake_case : Tuple=0.1 , _snake_case : int=512 , _snake_case : int=16 , _snake_case : List[Any]=2 , _snake_case : Any=0.0_2 , _snake_case : str=3 , _snake_case : Union[str, Any]=4 , _snake_case : str=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self : str): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = OpenLlamaModel(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , ): """simple docstring""" UpperCAmelCase_ = True UpperCAmelCase_ = OpenLlamaModel(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , ): """simple docstring""" UpperCAmelCase_ = OpenLlamaForCausalLM(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = OpenLlamaForCausalLM(config=_snake_case) model.to(_snake_case) model.eval() # first forward pass UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) UpperCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1) UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1) UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0] UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3)) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( a , a , a , unittest.TestCase ): UpperCAmelCase__ : Dict = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCAmelCase__ : int = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : Union[str, Any] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Any = False UpperCAmelCase__ : Dict = False def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = OpenLlamaModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37) def lowerCamelCase ( self : Tuple): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = input_dict['''input_ids'''] UpperCAmelCase_ = input_ids.ne(1).to(_snake_case) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = '''single_label_classification''' UpperCAmelCase_ = input_dict['''input_ids'''] UpperCAmelCase_ = input_ids.ne(1).to(_snake_case) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = '''multi_label_classification''' UpperCAmelCase_ = input_dict['''input_ids'''] UpperCAmelCase_ = input_ids.ne(1).to(_snake_case) UpperCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''') def lowerCamelCase ( self : List[Any]): """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)]) def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size) UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_ = OpenLlamaModel(_snake_case) original_model.to(_snake_case) original_model.eval() UpperCAmelCase_ = original_model(_snake_case).last_hidden_state UpperCAmelCase_ = original_model(_snake_case).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_ = {'''type''': scaling_type, '''factor''': 1_0.0} UpperCAmelCase_ = OpenLlamaModel(_snake_case) scaled_model.to(_snake_case) scaled_model.eval() UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5)) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
51
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Dict = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""PerceiverFeatureExtractor"""] __lowerCamelCase : str = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0
'''simple docstring''' def lowercase__ ( __lowercase : int , __lowercase : int ) -> int: """simple docstring""" while a != 0: __UpperCamelCase , __UpperCamelCase = b % a, a return b def lowercase__ ( __lowercase : int , __lowercase : int ) -> int: """simple docstring""" if gcd(__lowercase , __lowercase ) != 1: __UpperCamelCase = F'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(__lowercase ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1, 0, a __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 1, m while va != 0: __UpperCamelCase = ua // va __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
53
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Union[str, Any] = "facebook/bart-large-mnli" snake_case__ : str = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) snake_case__ : Tuple = "text_classifier" snake_case__ : Any = AutoTokenizer snake_case__ : Optional[Any] = AutoModelForSequenceClassification snake_case__ : Any = ["text", ["text"]] snake_case__ : Optional[int] = ["text"] def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]: super().setup() __SCREAMING_SNAKE_CASE = self.model.config __SCREAMING_SNAKE_CASE = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): __SCREAMING_SNAKE_CASE = int(UpperCAmelCase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = labels return self.pre_processor( [text] * len(UpperCAmelCase__ ) , [F"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
54
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __snake_case ( UpperCAmelCase_ : Dict ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ): lowerCamelCase_ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowerCamelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) lowerCamelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) lowerCamelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) lowerCamelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) lowerCamelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) lowerCamelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) lowerCamelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) lowerCamelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) lowerCamelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) lowerCamelCase_ = key.replace("image_encoder.module" , "flava.image_model" ) lowerCamelCase_ = key.replace("text_encoder.module" , "flava.text_model" ) lowerCamelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) lowerCamelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" ) lowerCamelCase_ = key.replace("text_projection" , "flava.text_projection" ) lowerCamelCase_ = key.replace("image_projection" , "flava.image_projection" ) lowerCamelCase_ = value.float() for key, value in codebook_state_dict.items(): lowerCamelCase_ = value return upgrade @torch.no_grad() def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ): if config_path is not None: lowerCamelCase_ = FlavaConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = FlavaConfig() lowerCamelCase_ = FlavaForPreTraining(UpperCAmelCase_ ).eval() lowerCamelCase_ = convert_dalle_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , save_checkpoint=UpperCAmelCase_ ) if os.path.exists(UpperCAmelCase_ ): lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" ) else: lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" ) lowerCamelCase_ = upgrade_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) hf_model.load_state_dict(UpperCAmelCase_ ) lowerCamelCase_ = hf_model.state_dict() lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) + count_parameters(UpperCAmelCase_ ) assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) hf_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") a_ : Union[str, Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
55
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class a ( _lowerCamelCase , unittest.TestCase ): snake_case_ = ProphetNetTokenizer snake_case_ = False def A_ ( self : Optional[Any] ): super().setUp() snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def A_ ( self : List[Any] , lowercase_ : Tuple ): snake_case_ = '''UNwant\u00E9d,running''' snake_case_ = '''unwanted, running''' return input_text, output_text def A_ ( self : Any ): snake_case_ = self.tokenizer_class(self.vocab_file ) snake_case_ = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowercase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : str ): snake_case_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def A_ ( self : str ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A_ ( self : Dict ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def A_ ( self : Any ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A_ ( self : str ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def A_ ( self : List[Any] ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A_ ( self : Any ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A_ ( self : Union[str, Any] ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def A_ ( self : Any ): snake_case_ = BasicTokenizer(do_lower_case=lowercase_ , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def A_ ( self : Dict ): snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] snake_case_ = {} for i, token in enumerate(lowercase_ ): snake_case_ = i snake_case_ = WordpieceTokenizer(vocab=lowercase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def A_ ( self : List[Any] ): snake_case_ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) snake_case_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] snake_case_ = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102] snake_case_ = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Tuple ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def A_ ( self : int ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def A_ ( self : Optional[int] ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def A_ ( self : Optional[Any] ): snake_case_ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase_ ) snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase_ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
56
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __lowerCAmelCase = update_area_of_max_square(_UpperCamelCase , col + 1 ) __lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 ) __lowerCAmelCase = update_area_of_max_square(row + 1 , _UpperCamelCase ) if mat[row][col]: __lowerCAmelCase = 1 + min([right, diagonal, down] ) __lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase ) return sub_problem_sol else: return 0 __lowerCAmelCase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' def update_area_of_max_square_using_dp_array( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __lowerCAmelCase = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase ) __lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase ) __lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase ) if mat[row][col]: __lowerCAmelCase = 1 + min([right, diagonal, down] ) __lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase ) __lowerCAmelCase = sub_problem_sol return sub_problem_sol else: return 0 __lowerCAmelCase = [0] __lowerCAmelCase = [[-1] * cols for _ in range(_UpperCamelCase )] update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase ) return largest_square_area[0] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )] __lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase = dp_array[row][col + 1] __lowerCAmelCase = dp_array[row + 1][col + 1] __lowerCAmelCase = dp_array[row + 1][col] if mat[row][col] == 1: __lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase = max(dp_array[row][col] , _UpperCamelCase ) else: __lowerCAmelCase = 0 return largest_square_area def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [0] * (cols + 1) __lowerCAmelCase = [0] * (cols + 1) __lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase = current_row[col + 1] __lowerCAmelCase = next_row[col + 1] __lowerCAmelCase = next_row[col] if mat[row][col] == 1: __lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase = max(current_row[col] , _UpperCamelCase ) else: __lowerCAmelCase = 0 __lowerCAmelCase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
57
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
0
'''simple docstring''' lowercase_ = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
58
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
0
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase ( A_ ,A_ ): @register_to_config def __init__(self : Dict , snake_case__ : bool , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None ) -> List[Any]: '''simple docstring''' super().__init__() snake_case : Optional[int] = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" snake_case : Optional[int] = torch.zeros(snake_case__ , snake_case__ ) else: snake_case : Any = None snake_case : Optional[Any] = torch.nn.Parameter(snake_case__ ) class UpperCAmelCase ( A_ ): A__ : VQModel A__ : CLIPTextModel A__ : CLIPTokenizer A__ : TransformeraDModel A__ : LearnedClassifierFreeSamplingEmbeddings A__ : VQDiffusionScheduler def __init__(self : str , snake_case__ : VQModel , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : TransformeraDModel , snake_case__ : VQDiffusionScheduler , snake_case__ : LearnedClassifierFreeSamplingEmbeddings , ) -> List[Any]: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case : List[str] = len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings snake_case : Optional[Any] = self.tokenizer( snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) snake_case : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: snake_case : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length] snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 snake_case : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt snake_case : Dict = prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: snake_case : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings snake_case : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: snake_case : Optional[int] = [""] * batch_size snake_case : int = text_input_ids.shape[-1] snake_case : Any = self.tokenizer( snake_case__ , padding="max_length" , max_length=snake_case__ , truncation=snake_case__ , return_tensors="pt" , ) snake_case : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings snake_case : Optional[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method snake_case : Dict = negative_prompt_embeds.shape[1] snake_case : Any = negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) snake_case : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__(self : Any , snake_case__ : Union[str, List[str]] , snake_case__ : int = 1_00 , snake_case__ : float = 5.0 , snake_case__ : float = 1.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): snake_case : Optional[Any] = 1 elif isinstance(snake_case__ , snake_case__ ): snake_case : int = len(snake_case__ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}""" ) snake_case : List[str] = batch_size * num_images_per_prompt snake_case : Union[str, Any] = guidance_scale > 1.0 snake_case : Optional[int] = self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(snake_case__ )}.""" ) # get the initial completely masked latents unless the user supplied it snake_case : Tuple = (batch_size, self.transformer.num_latent_pixels) if latents is None: snake_case : Optional[int] = self.transformer.num_vector_embeds - 1 snake_case : Optional[int] = torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) snake_case : List[str] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) snake_case : Optional[int] = self.scheduler.timesteps.to(self.device ) snake_case : List[str] = latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance snake_case : List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` snake_case : Tuple = self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: snake_case , snake_case : Optional[Any] = model_output.chunk(2 ) snake_case : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) snake_case : Tuple = self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) snake_case : List[str] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 snake_case : int = self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) snake_case : List[Any] = self.vqvae.config.vq_embed_dim snake_case : List[str] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) snake_case : List[str] = self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) snake_case : Optional[Any] = self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample snake_case : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case : int = self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : float ) -> torch.FloatTensor: '''simple docstring''' snake_case , snake_case : Optional[int] = torch.sort(snake_case__ , 1 , descending=snake_case__ ) snake_case : List[Any] = torch.exp(snake_case__ ) snake_case : List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out snake_case : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) snake_case : List[Any] = torch.cat((all_true, keep_mask) , dim=1 ) snake_case : str = keep_mask[:, :-1, :] snake_case : Tuple = keep_mask.gather(1 , indices.argsort(1 ) ) snake_case : Any = log_p_x_0.clone() snake_case : List[Any] = -torch.inf # -inf = log(0) return rv
59
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
"""simple docstring""" from math import log from scipy.constants import Boltzmann, physical_constants snake_case__ : Optional[Any] = 300 # TEMPERATURE (unit = K) def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float , ): if donor_conc <= 0: raise ValueError('''Donor concentration should be positive''' ) elif acceptor_conc <= 0: raise ValueError('''Acceptor concentration should be positive''' ) elif intrinsic_conc <= 0: raise ValueError('''Intrinsic concentration should be positive''' ) elif donor_conc <= intrinsic_conc: raise ValueError( '''Donor concentration should be greater than intrinsic concentration''' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( '''Acceptor concentration should be greater than intrinsic concentration''' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
60
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
0
"""simple docstring""" from itertools import product def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Dict = sides_number UpperCAmelCase_ : Optional[Any] = max_face_number * dice_number UpperCAmelCase_ : Optional[Any] = [0] * (max_total + 1) UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : int = range(__lowerCamelCase, max_face_number + 1 ) for dice_numbers in product(__lowerCamelCase, repeat=__lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = sum(__lowerCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def __a ( ): UpperCAmelCase_ : Optional[Any] = total_frequency_distribution( sides_number=4, dice_number=9 ) UpperCAmelCase_ : Union[str, Any] = total_frequency_distribution( sides_number=6, dice_number=6 ) UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Dict = 9 UpperCAmelCase_ : Tuple = 4 * 9 UpperCAmelCase_ : int = 6 for peter_total in range(__lowerCamelCase, max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase_ : List[Any] = (4**9) * (6**6) UpperCAmelCase_ : List[str] = peter_wins_count / total_games_number UpperCAmelCase_ : Any = round(__lowerCamelCase, ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
61
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "Salesforce/blip-image-captioning-base" UpperCAmelCase__ : int = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) UpperCAmelCase__ : Optional[int] = "image_captioner" UpperCAmelCase__ : Tuple = AutoModelForVisionaSeq UpperCAmelCase__ : Union[str, Any] = ["image"] UpperCAmelCase__ : Tuple = ["text"] def __init__( self , *A_ , **A_ ) -> str: requires_backends(self , ['vision'] ) super().__init__(*A_ , **A_ ) def _a ( self , A_ ) -> Optional[int]: return self.pre_processor(images=A_ , return_tensors='pt' ) def _a ( self , A_ ) -> Any: return self.model.generate(**A_ ) def _a ( self , A_ ) -> str: return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
62
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : int , __a : Optional[NestedDataStructureLike[PathLike]] = None , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : int , ): _a = path_or_paths _a = split if split or isinstance(__a , __a ) else "train" _a = features _a = cache_dir _a = keep_in_memory _a = streaming _a = num_proc _a = kwargs @abstractmethod def UpperCamelCase__ ( self : Union[str, Any] ): pass class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : List[Any] , ): _a = features _a = cache_dir _a = keep_in_memory _a = streaming _a = num_proc _a = kwargs @abstractmethod def UpperCamelCase__ ( self : List[Any] ): pass
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule A_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
0
def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' UpperCAmelCase__ = credit_card_number UpperCAmelCase__ = 0 UpperCAmelCase__ = len(__A ) - 2 for i in range(__A, -1, -2 ): # double the value of every second digit UpperCAmelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCAmelCase__ = cc_number[:i] + str(__A ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__A ) - 1, -1, -2 ): total += int(cc_number[i] ) return total % 10 == 0 def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' UpperCAmelCase__ = f"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(f"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(__A ) <= 16: print(f"""{error_message} of its length.""" ) return False if not validate_initial_digits(__A ): print(f"""{error_message} of its first two digits.""" ) return False if not luhn_validation(__A ): print(f"""{error_message} it fails the Luhn check.""" ) return False print(f"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
65
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9}, }, { """framework""": """tensorflow""", """script""": """run_tf.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple=1 ) -> Optional[int]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> Any: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) def lowerCAmelCase_ ( self: List[str] ) -> int: # create estimator snake_case_ :int = self.create_estimator() # run training estimator.fit() # result dataframe snake_case_ :List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :Union[str, Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
0
'''simple docstring''' import requests def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None: __lowerCamelCase = {'''Content-Type''': '''application/json'''} __lowerCamelCase = requests.post(UpperCamelCase__ , json={'''text''': message_body} , headers=UpperCamelCase__ ) if response.status_code != 2_00: __lowerCamelCase = ( '''Request to slack returned an error ''' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(UpperCamelCase__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
67
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """ybelkada/fonts""" def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' "Pix2StructImageProcessor. Please upgrade torch." ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> Tuple: '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) _check_torch_version() A__ = image_tensor.unsqueeze(0 ) A__ = torch.nn.functional.unfold(SCREAMING_SNAKE_CASE_ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) A__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ) A__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int = 3_6 , SCREAMING_SNAKE_CASE_: str = "black" , SCREAMING_SNAKE_CASE_: str = "white" , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: Optional[bytes] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None , ) -> Image.Image: '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , "vision" ) # Add new lines so that each line is no more than 80 characters. A__ = textwrap.TextWrapper(width=8_0 ) A__ = wrapper.wrap(text=SCREAMING_SNAKE_CASE_ ) A__ = "\n".join(SCREAMING_SNAKE_CASE_ ) if font_bytes is not None and font_path is None: A__ = io.BytesIO(SCREAMING_SNAKE_CASE_ ) elif font_path is not None: A__ = font_path else: A__ = hf_hub_download(SCREAMING_SNAKE_CASE_ , "Arial.TTF" ) A__ = ImageFont.truetype(SCREAMING_SNAKE_CASE_ , encoding="UTF-8" , size=SCREAMING_SNAKE_CASE_ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. A__ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , SCREAMING_SNAKE_CASE_ ) ) A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Create the actual image with a bit of padding around the text. A__ = text_width + left_padding + right_padding A__ = text_height + top_padding + bottom_padding A__ = Image.new("RGB" , (image_width, image_height) , SCREAMING_SNAKE_CASE_ ) A__ = ImageDraw.Draw(SCREAMING_SNAKE_CASE_ ) draw.text(xy=(left_padding, top_padding) , text=SCREAMING_SNAKE_CASE_ , fill=SCREAMING_SNAKE_CASE_ , font=SCREAMING_SNAKE_CASE_ ) return image def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: np.ndarray , SCREAMING_SNAKE_CASE_: str , **SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , "vision" ) # Convert to PIL image if necessary A__ = to_pil_image(SCREAMING_SNAKE_CASE_ ) A__ = render_text(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A__ = max(header_image.width , image.width ) A__ = int(image.height * (new_width / image.width) ) A__ = int(header_image.height * (new_width / header_image.width) ) A__ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary A__ = to_numpy_array(SCREAMING_SNAKE_CASE_ ) if infer_channel_dimension_format(SCREAMING_SNAKE_CASE_ ) == ChannelDimension.LAST: A__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , ChannelDimension.LAST ) return new_image class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['flattened_patches'] def __init__( self , lowercase = True , lowercase = True , lowercase = None , lowercase = 2048 , lowercase = False , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = patch_size if patch_size is not None else {"height": 16, "width": 16} A__ = do_normalize A__ = do_convert_rgb A__ = max_patches A__ = is_vqa def UpperCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> np.ndarray: '''simple docstring''' requires_backends(self.extract_flattened_patches , "torch" ) _check_torch_version() # convert to torch A__ = to_channel_dimension_format(lowercase , ChannelDimension.FIRST ) A__ = torch.from_numpy(lowercase ) A__ , A__ = patch_size["height"], patch_size["width"] A__ , A__ = get_image_size(lowercase ) # maximize scale s.t. A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) A__ = max(min(math.floor(scale * image_height / patch_height ) , lowercase ) , 1 ) A__ = max(min(math.floor(scale * image_width / patch_width ) , lowercase ) , 1 ) A__ = max(num_feasible_rows * patch_height , 1 ) A__ = max(num_feasible_cols * patch_width , 1 ) A__ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowercase , antialias=lowercase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] A__ = torch_extract_patches(lowercase , lowercase , lowercase ) A__ = patches.shape A__ = patches_shape[1] A__ = patches_shape[2] A__ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] A__ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] A__ = torch.arange(lowercase ).reshape([rows, 1] ).repeat(1 , lowercase ).reshape([rows * columns, 1] ) A__ = torch.arange(lowercase ).reshape([1, columns] ).repeat(lowercase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] A__ = row_ids.to(torch.floataa ) A__ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] A__ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] A__ = torch.nn.functional.pad(lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float() A__ = to_numpy_array(lowercase ) return result def UpperCamelCase ( self , lowercase , lowercase = None , **lowercase ) -> np.ndarray: '''simple docstring''' if image.dtype == np.uinta: A__ = image.astype(np.floataa ) # take mean across the whole `image` A__ = np.mean(lowercase ) A__ = np.std(lowercase ) A__ = max(lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowercase , mean=lowercase , std=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> ImageInput: '''simple docstring''' A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = patch_size if patch_size is not None else self.patch_size A__ = max_patches if max_patches is not None else self.max_patches A__ = self.is_vqa if kwargs.get("data_format" , lowercase ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) A__ = kwargs.pop("font_bytes" , lowercase ) A__ = kwargs.pop("font_path" , lowercase ) if isinstance(lowercase , lowercase ): A__ = [header_text] * len(lowercase ) A__ = [ render_header(lowercase , header_text[i] , font_bytes=lowercase , font_path=lowercase ) for i, image in enumerate(lowercase ) ] if do_normalize: A__ = [self.normalize(image=lowercase ) for image in images] # convert to torch tensor and permute A__ = [ self.extract_flattened_patches(image=lowercase , max_patches=lowercase , patch_size=lowercase ) for image in images ] # create attention mask in numpy A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] A__ = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowercase ) return encoded_outputs
68
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import _LazyModule __UpperCamelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
69
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES A__ : Any ='''tiny-wmt19-en-ru''' # Build # borrowed from a test A__ : Optional[Any] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] A__ : Dict =dict(zip(vocab, range(len(vocab)))) A__ : Tuple =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: A__ : str =Path(tmpdirname) A__ : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] A__ : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] A__ : str =build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) A__ : Any =FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) A__ : Optional[Any] =FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=10_00, tgt_vocab_size=10_00, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) A__ : List[Any] =FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test A__ : Union[str, Any] =tokenizer(['''Making tiny model'''], return_tensors='''pt''') A__ : List[str] =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
70
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __A ( a ): """simple docstring""" UpperCamelCase__ : Any =["""image_processor""", """tokenizer"""] UpperCamelCase__ : List[Any] ="""LayoutLMv3ImageProcessor""" UpperCamelCase__ : Dict =("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""") def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Dict =None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCamelCase__ , ) __UpperCamelCase : Tuple =kwargs.pop('feature_extractor' ) __UpperCamelCase : Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor __UpperCamelCase : List[str] =self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : int =[text] # add batch dimension (as the image processor always adds a batch dimension) __UpperCamelCase : Any =features['words'] __UpperCamelCase : int =self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) # add pixel values __UpperCamelCase : Dict =features.pop('pixel_values' ) if return_overflowing_tokens is True: __UpperCamelCase : Union[str, Any] =self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['overflow_to_sample_mapping'] ) __UpperCamelCase : List[str] =images return encoded_inputs def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Dict =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f' {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}' ) return images_with_overflow def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property def __lowercase ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __lowercase ( self ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase__ , ) return self.image_processor_class @property def __lowercase ( self ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCamelCase__ , ) return self.image_processor
71
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
0
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch lowerCAmelCase__ = '''sshleifer/bart-tiny-random''' lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : List[Any] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : Optional[int] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : List[str] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , *_lowerCamelCase : Any = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" with self.assertRaises(__lowerCAmelCase ): create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
72
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str=0): __lowerCamelCase : Tuple = np.random.RandomState(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.get_dummy_inputs() __lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : Dict): __lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') __lowerCamelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = self.get_dummy_inputs() __lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : Union[str, Any] = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : Tuple): __lowerCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') __lowerCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.get_dummy_inputs() __lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[str] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') __lowerCamelCase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = self.get_dummy_inputs() __lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') __lowerCamelCase : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.get_dummy_inputs() __lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[Any] = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') __lowerCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() __lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images __lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[str] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.get_dummy_inputs() __lowerCamelCase : Optional[int] = 3 * [inputs['prompt']] # forward __lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] __lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() __lowerCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt')] __lowerCamelCase : Tuple = pipe.tokenizer( SCREAMING_SNAKE_CASE__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors='np' ,) __lowerCamelCase : List[Any] = text_inputs['input_ids'] __lowerCamelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0] __lowerCamelCase : List[Any] = prompt_embeds # forward __lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4 def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.get_dummy_inputs() __lowerCamelCase : Dict = 3 * ['this is a negative prompt'] __lowerCamelCase : Dict = negative_prompt __lowerCamelCase : Any = 3 * [inputs['prompt']] # forward __lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = output.images[0, -3:, -3:, -1] __lowerCamelCase : Any = self.get_dummy_inputs() __lowerCamelCase : Dict = 3 * [inputs.pop('prompt')] __lowerCamelCase : Optional[int] = [] for p in [prompt, negative_prompt]: __lowerCamelCase : Any = pipe.tokenizer( SCREAMING_SNAKE_CASE__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors='np' ,) __lowerCamelCase : Optional[int] = text_inputs['input_ids'] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]) __lowerCamelCase , __lowerCamelCase : Optional[int] = embeds # forward __lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class A_ ( unittest.TestCase ): @property def lowerCAmelCase ( self : str): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCAmelCase ( self : str): __lowerCamelCase : List[Any] = ort.SessionOptions() __lowerCamelCase : Any = False return options def lowerCAmelCase ( self : Union[str, Any]): # using the PNDM scheduler by default __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = 'A painting of a squirrel eating a burger' np.random.seed(0) __lowerCamelCase : Dict = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=1_0 ,output_type='np') __lowerCamelCase : Union[str, Any] = output.images __lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : Optional[int] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : Any = DDIMScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx') __lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = 'open neural network exchange' __lowerCamelCase : Union[str, Any] = np.random.RandomState(0) __lowerCamelCase : int = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np') __lowerCamelCase : Union[str, Any] = output.images __lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : List[str] = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx') __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = 'open neural network exchange' __lowerCamelCase : Dict = np.random.RandomState(0) __lowerCamelCase : Optional[int] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np') __lowerCamelCase : Union[str, Any] = output.images __lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : List[str] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowerCAmelCase ( self : int): __lowerCamelCase : List[str] = 0 def test_callback_fn(SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : np.ndarray) -> None: __lowerCamelCase : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 6_4, 6_4) __lowerCamelCase : Dict = latents[0, -3:, -3:, -1] __lowerCamelCase : Dict = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 6_4, 6_4) __lowerCamelCase : Tuple = latents[0, -3:, -3:, -1] __lowerCamelCase : Optional[int] = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3 __lowerCamelCase : int = False __lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = 'Andromeda galaxy in a bottle' __lowerCamelCase : str = np.random.RandomState(0) pipe( prompt=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=1 ,) assert test_callback_fn.has_been_called assert number_of_steps == 6 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) assert pipe.safety_checker is None __lowerCamelCase : Union[str, Any] = pipe('example prompt' ,num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowerCamelCase : Tuple = pipe('example prompt' ,num_inference_steps=2).images[0] assert image is not None
73
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black _lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowercase = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir ,'schedulers/' ) ) A = self.diffusers_dir shutil.copy( os.path.join(A_ ,'src/diffusers/schedulers/scheduling_ddpm.py' ) ,os.path.join(self.diffusers_dir ,'schedulers/scheduling_ddpm.py' ) ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str]=None ) -> List[str]: A = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: A = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result A = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ) A = black.format_str(A_ ,mode=A_ ) A = os.path.join(self.diffusers_dir ,'new_code.py' ) with open(A_ ,'w' ,newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name ,overwrite=A_ ) with open(A_ ,'r' ) as f: self.assertTrue(f.read() ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,REFERENCE_CODE + '\n' ,) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,A_ ,) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,re.sub('DDPM' ,'Test' ,A_ ) ,) # Copy consistency with a really long name A = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' ,F'{long_class_name}SchedulerOutput' ,re.sub('Bert' ,A_ ,A_ ) ,) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,A_ ,overwrite_result=re.sub('DDPM' ,'Test' ,A_ ) ,)
74
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml a_ : int = logging.get_logger(__name__) def a_ ( __snake_case : bool , __snake_case : bool ) -> List[str]: """simple docstring""" def run_func(__snake_case : List[str] ): @wraps(__snake_case ) def run_in_eager_mode(*__snake_case : Tuple , **__snake_case : Tuple ): return func(*__snake_case , **__snake_case ) @wraps(__snake_case ) @tf.function(experimental_compile=__snake_case ) def run_in_graph_mode(*__snake_case : int , **__snake_case : Union[str, Any] ): return func(*__snake_case , **__snake_case ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def a_ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> ["tf.Tensor"]: """simple docstring""" lowerCamelCase_ =random.Random() lowerCamelCase_ =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : TensorFlowBenchmarkArguments lowercase : PretrainedConfig lowercase : str ="TensorFlow" @property def lowercase__ ( self ): """simple docstring""" return tf.__version__ def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase_ =self._prepare_inference_func(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return self._measure_speed(_inference ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase_ =self._prepare_train_func(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return self._measure_speed(_train ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], lowerCAmelCase ) lowerCamelCase_ =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase_ =self._prepare_inference_func(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return self._measure_memory(_inference ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], lowerCAmelCase ) lowerCamelCase_ =self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase_ =self._prepare_train_func(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return self._measure_memory(_train ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase_ =( hasattr(lowerCAmelCase, '''architectures''' ) and isinstance(config.architectures, lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase_ ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase_ =__import__('''transformers''', fromlist=[model_class] ) lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model_cls(lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase_ =TF_MODEL_MAPPING[config.__class__](lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowerCamelCase_ =config.vocab_size if hasattr(lowerCAmelCase, '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase_ =random_input_ids(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCAmelCase, decoder_input_ids=lowerCAmelCase, training=lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(lowerCAmelCase, training=lowerCAmelCase ) lowerCamelCase_ =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase_ =( hasattr(lowerCAmelCase, '''architectures''' ) and isinstance(config.architectures, lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase_ ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase_ =__import__('''transformers''', fromlist=[model_class] ) lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model_cls(lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase_ =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowerCamelCase_ =config.vocab_size if hasattr(lowerCAmelCase, '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase_ =random_input_ids(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): lowerCamelCase_ =model(lowerCAmelCase, decoder_input_ids=lowerCAmelCase, labels=lowerCAmelCase, training=lowerCAmelCase )[0] lowerCamelCase_ =tf.gradients(lowerCAmelCase, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase, training=lowerCAmelCase )[0] lowerCamelCase_ =tf.gradients(lowerCAmelCase, model.trainable_variables ) return gradients lowerCamelCase_ =encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(lowerCAmelCase, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCamelCase_ =timeit.repeat( lowerCAmelCase, repeat=self.args.repeat, number=10, ) return min(lowerCAmelCase ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) lowerCamelCase_ =start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) lowerCamelCase_ ='''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() lowerCamelCase_ =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCamelCase_ =nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase ) lowerCamelCase_ =meminfo.used lowerCamelCase_ =Memory(lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) lowerCamelCase_ =None else: lowerCamelCase_ =measure_peak_memory_cpu(lowerCAmelCase ) lowerCamelCase_ =Memory(lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCamelCase_ =stop_memory_tracing(lowerCAmelCase ) if memory is None: lowerCamelCase_ =summary.total else: lowerCamelCase_ =None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
0
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a_ = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) a_ = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } a_ = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) a_ = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) a_ = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' a_ = '' a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' a_ = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase__ ( _a , _a): assert ReadMe.from_string(_a , _a).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase__ ( _a , _a): with pytest.raises(_a , match=re.escape(expected_error.format(path="root"))): SCREAMING_SNAKE_CASE : str = ReadMe.from_string(_a , _a) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _a , _a): with pytest.raises(_a , match=re.escape(expected_error.format(path="root"))): ReadMe.from_string(_a , _a) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _a): ReadMe.from_string(_a , _a , suppress_parsing_errors=_a) @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase__ ( _a , _a): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : Optional[int] = Path(_a) / "README.md" with open(_a , "w+") as readme_file: readme_file.write(_a) SCREAMING_SNAKE_CASE : Optional[Any] = ReadMe.from_readme(_a , _a).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase__ ( _a , _a): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : List[str] = Path(_a) / "README.md" with open(_a , "w+") as readme_file: readme_file.write(_a) SCREAMING_SNAKE_CASE : Optional[Any] = expected_error.format(path=_a) with pytest.raises(_a , match=re.escape(_a)): SCREAMING_SNAKE_CASE : Optional[int] = ReadMe.from_readme(_a , _a) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _a , _a): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : Any = Path(_a) / "README.md" with open(_a , "w+") as readme_file: readme_file.write(_a) SCREAMING_SNAKE_CASE : Dict = expected_error.format(path=_a) with pytest.raises(_a , match=re.escape(_a)): ReadMe.from_readme(_a , _a) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _a): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : int = Path(_a) / "README.md" with open(_a , "w+") as readme_file: readme_file.write(_a) ReadMe.from_readme(_a , _a , suppress_parsing_errors=_a)
76
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' return abs(_lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , _lowerCAmelCase ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase__ , lowercase__ : Optional[int] = y, x % y return abs(_lowerCAmelCase ) def a_ ( ): '''simple docstring''' try: lowercase__ : int = input('Enter two integers separated by comma (,): ' ).split(',' ) lowercase__ : Tuple = int(nums[0] ) lowercase__ : str = int(nums[1] ) print( f"""greatest_common_divisor({num_a}, {num_a}) = """ f"""{greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCAmelCase , _lowerCAmelCase )}""" ) except (IndexError, UnboundLocalError, ValueError): print('Wrong input' ) if __name__ == "__main__": main()
77
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging snake_case_ = """\ """ snake_case_ = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ snake_case_ = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self :str ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def UpperCAmelCase__ ( self :str , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :int = 16 , lowercase_ :bool = True , lowercase_ :Any=None ) -> Optional[int]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase = 'cuda' else: UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowercase_ ) UpperCAmelCase = model.to(lowercase_ ) UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(lowercase_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase = model.config.max_length - 1 else: UpperCAmelCase = model.config.max_length UpperCAmelCase = tokenizer( lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors='pt' , return_attention_mask=lowercase_ , ).to(lowercase_ ) UpperCAmelCase = encodings['input_ids'] UpperCAmelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase = [] UpperCAmelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(lowercase_ ) , lowercase_ ) ): UpperCAmelCase = min(start_index + batch_size , len(lowercase_ ) ) UpperCAmelCase = encoded_texts[start_index:end_index] UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase_ ) UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase_ ), attn_mask] , dim=1 ) UpperCAmelCase = encoded_batch with torch.no_grad(): UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ).logits UpperCAmelCase = out_logits[..., :-1, :].contiguous() UpperCAmelCase = labels[..., 1:].contiguous() UpperCAmelCase = attn_mask[..., 1:].contiguous() UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , lowercase_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase_ )}
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
0
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def _UpperCamelCase ( __A ) -> List[Any]: '''simple docstring''' warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , __A , ) if isinstance(__A , torch.Tensor ): return image elif isinstance(__A , PIL.Image.Image ): UpperCamelCase__ = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ = image[0].size UpperCamelCase__ , UpperCamelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 UpperCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] UpperCamelCase__ = np.concatenate(__A , axis=0 ) UpperCamelCase__ = np.array(__A ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = 2.0 * image - 1.0 UpperCamelCase__ = torch.from_numpy(__A ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase__ = torch.cat(__A , dim=0 ) return image def _UpperCamelCase ( __A ) -> Optional[int]: '''simple docstring''' if isinstance(__A , torch.Tensor ): return mask elif isinstance(__A , PIL.Image.Image ): UpperCamelCase__ = [mask] if isinstance(mask[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ = mask[0].size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] UpperCamelCase__ = np.concatenate(__A , axis=0 ) UpperCamelCase__ = mask.astype(np.floataa ) / 255.0 UpperCamelCase__ = 0 UpperCamelCase__ = 1 UpperCamelCase__ = torch.from_numpy(__A ) elif isinstance(mask[0] , torch.Tensor ): UpperCamelCase__ = torch.cat(__A , dim=0 ) return mask class lowercase_ ( a__ ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 def __init__( self , a , a ): super().__init__() self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self , a , a , a = 2_50 , a = 0.0 , a = 10 , a = 10 , a = None , a = "pil" , a = True , ): UpperCamelCase__ = image UpperCamelCase__ = _preprocess_image(a ) UpperCamelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ = _preprocess_mask(a ) UpperCamelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(a , a ) and len(a ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(a )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCamelCase__ = original_image.shape UpperCamelCase__ = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(a , a , a , self.device ) UpperCamelCase__ = eta UpperCamelCase__ = self.scheduler.timesteps[0] + 1 UpperCamelCase__ = generator[0] if isinstance(a , a ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual UpperCamelCase__ = self.unet(a , a ).sample # compute previous image: x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(a , a , a , a , a , a ).prev_sample else: # compute the reverse: x_t-1 -> x_t UpperCamelCase__ = self.scheduler.undo_step(a , a , a ) UpperCamelCase__ = t UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
80
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
0
"""simple docstring""" def _A ( lowercase , lowercase ): """simple docstring""" assert x is not None assert y is not None a =len(lowercase ) a =len(lowercase ) # declaring the array for storing the dp values a =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): a =1 if x[i - 1] == y[j - 1] else 0 a =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) a ='''''' a , a =m, n while i > 0 and j > 0: a =1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: a =x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowerCamelCase_ : List[Any] = """AGGTAB""" lowerCamelCase_ : str = """GXTXAYB""" lowerCamelCase_ : Any = 4 lowerCamelCase_ : List[Any] = """GTAB""" lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = longest_common_subsequence(a, b) print("""len =""", ln, """, sub-sequence =""", subseq) import doctest doctest.testmod()
81
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) A__ = logging.get_logger(__name__) # pylint: disable=invalid-name A__ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _UpperCAmelCase ( snake_case , snake_case , snake_case=8 ): """simple docstring""" _lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _UpperCAmelCase ( snake_case , snake_case=5_12 , snake_case=5_12 ): """simple docstring""" _lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) _lowerCAmelCase = np.array(pil_image.convert("""RGB""" ) ) _lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1 _lowerCAmelCase = np.transpose(snake_case , [2, 0, 1] ) _lowerCAmelCase = torch.from_numpy(snake_case ).unsqueeze(0 ) return image class __lowerCAmelCase ( lowerCamelCase__ ): def __init__( self , _snake_case , _snake_case , _snake_case , ): """simple docstring""" super().__init__() self.register_modules( unet=_snake_case , scheduler=_snake_case , movq=_snake_case , ) _lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = min(int(num_inference_steps * strength ) , _snake_case ) _lowerCAmelCase = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ): """simple docstring""" if not isinstance(_snake_case , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_snake_case )}' ) _lowerCAmelCase = image.to(device=_snake_case , dtype=_snake_case ) _lowerCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: _lowerCAmelCase = image else: if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) elif isinstance(_snake_case , _snake_case ): _lowerCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_snake_case ) ] _lowerCAmelCase = torch.cat(_snake_case , dim=0 ) else: _lowerCAmelCase = self.movq.encode(_snake_case ).latent_dist.sample(_snake_case ) _lowerCAmelCase = self.movq.config.scaling_factor * init_latents _lowerCAmelCase = torch.cat([init_latents] , dim=0 ) _lowerCAmelCase = init_latents.shape _lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case ) # get latents _lowerCAmelCase = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case ) _lowerCAmelCase = init_latents return latents def snake_case ( self , _snake_case=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _lowerCAmelCase = torch.device(F'cuda:{gpu_id}' ) _lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_snake_case , _snake_case ) def snake_case ( self , _snake_case=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _lowerCAmelCase = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=_snake_case ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case ) # We'll offload the last model manually. _lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self ): """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(_snake_case , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_snake_case ) def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 512 , _snake_case = 512 , _snake_case = 100 , _snake_case = 4.0 , _snake_case = 0.3 , _snake_case = 1 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ): """simple docstring""" _lowerCAmelCase = self._execution_device _lowerCAmelCase = guidance_scale > 1.0 if isinstance(_snake_case , _snake_case ): _lowerCAmelCase = torch.cat(_snake_case , dim=0 ) _lowerCAmelCase = image_embeds.shape[0] if isinstance(_snake_case , _snake_case ): _lowerCAmelCase = torch.cat(_snake_case , dim=0 ) if do_classifier_free_guidance: _lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 ) _lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 ) _lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case ) if not isinstance(_snake_case , _snake_case ): _lowerCAmelCase = [image] if not all(isinstance(_snake_case , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'Input is in incorrect format: {[type(_snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor' ) _lowerCAmelCase = torch.cat([prepare_image(_snake_case , _snake_case , _snake_case ) for i in image] , dim=0 ) _lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_snake_case ) _lowerCAmelCase = self.movq.encode(_snake_case )["""latents"""] _lowerCAmelCase = latents.repeat_interleave(_snake_case , dim=0 ) self.scheduler.set_timesteps(_snake_case , device=_snake_case ) _lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(_snake_case , _snake_case , _snake_case ) _lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor ) _lowerCAmelCase = self.prepare_latents( _snake_case , _snake_case , _snake_case , _snake_case , image_embeds.dtype , _snake_case , _snake_case ) for i, t in enumerate(self.progress_bar(_snake_case ) ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase = {"""image_embeds""": image_embeds} _lowerCAmelCase = self.unet( sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0] if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) _lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 ) _lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 ) _lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step( _snake_case , _snake_case , _snake_case , generator=_snake_case , )[0] # post-processing _lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: _lowerCAmelCase = image * 0.5 + 0.5 _lowerCAmelCase = image.clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(_snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case )
82
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
0
'''simple docstring''' import os import jsonlines import numpy as np from tqdm import tqdm snake_case_ : Optional[Any] = 2048 snake_case_ : Union[str, Any] = 4096 snake_case_ : int = 42 snake_case_ : Any = os.environ.pop('PROCESS_TRAIN', 'false') snake_case_ : Dict = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4} def A__ ( UpperCAmelCase_ ): def choose_first(UpperCAmelCase_ , UpperCAmelCase_=False ): assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) == 1: _UpperCamelCase : Any = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: _UpperCamelCase : Optional[Any] = {k: [a[k]] for k in a} if len(a['start_token'] ) > 0: break return a _UpperCamelCase : Optional[int] = {'id': example['id']} _UpperCamelCase : Any = example['annotations'] _UpperCamelCase : int = annotation['yes_no_answer'] if 0 in yes_no_answer or 1 in yes_no_answer: _UpperCamelCase : Union[str, Any] = ['yes'] if 1 in yes_no_answer else ['no'] _UpperCamelCase : List[Any] = [] _UpperCamelCase : Optional[int] = [] _UpperCamelCase : Any = ['<cls>'] else: _UpperCamelCase : List[str] = ['short'] _UpperCamelCase : List[Any] = choose_first(annotation['short_answers'] ) if len(out['start_token'] ) == 0: # answer will be long if short is not available _UpperCamelCase : str = ['long'] _UpperCamelCase : Dict = choose_first(annotation['long_answer'] , is_long_answer=UpperCAmelCase_ ) _UpperCamelCase : List[str] = [] answer.update(UpperCAmelCase_ ) # disregard some samples if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]: _UpperCamelCase : List[str] = True else: _UpperCamelCase : str = False _UpperCamelCase : List[str] = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text'] if not all(isinstance(answer[k] , UpperCAmelCase_ ) for k in cols ): raise ValueError('Issue in ID' , example['id'] ) return answer def A__ ( UpperCAmelCase_ , UpperCAmelCase_=False ): _UpperCamelCase : List[str] = _get_single_answer(UpperCAmelCase_ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element _UpperCamelCase : Optional[Any] = example['document']['tokens'] _UpperCamelCase : List[str] = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) return { "context": " ".join(UpperCAmelCase_ ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples _UpperCamelCase : Optional[int] = ['start_token', 'end_token'] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 _UpperCamelCase : List[Any] = example['document']['tokens'] _UpperCamelCase : Union[str, Any] = answer['start_token'] _UpperCamelCase : List[Any] = answer['end_token'] _UpperCamelCase : Dict = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 _UpperCamelCase : Tuple = ' '.join(context[start_token:end_token] ) # checking above code if assertion: _UpperCamelCase : List[Any] = doc['is_html'][answer['start_token'] : answer['end_token']] _UpperCamelCase : List[Any] = doc['token'][answer['start_token'] : answer['end_token']] _UpperCamelCase : List[str] = ' '.join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] ) if new != old: print('ID:' , example['id'] ) print('New:' , UpperCAmelCase_ , end='\n' ) print('Old:' , UpperCAmelCase_ , end='\n\n' ) return { "context": " ".join(UpperCAmelCase_ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=2_0_4_8 , UpperCAmelCase_=4_0_9_6 , UpperCAmelCase_=True ): # overlap will be of doc_stride - q_len _UpperCamelCase : Any = get_context_and_ans(UpperCAmelCase_ , assertion=UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = out['answer'] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } _UpperCamelCase : Optional[Any] = tokenizer(example['question']['text'] , out['context'] ).input_ids _UpperCamelCase : Dict = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : str = [] _UpperCamelCase : Any = input_ids[:q_len] _UpperCamelCase : Optional[Any] = range(UpperCAmelCase_ , len(UpperCAmelCase_ ) , max_length - doc_stride ) for i in doc_start_indices: _UpperCamelCase : Optional[Any] = i + max_length - q_len _UpperCamelCase : Any = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['category'][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(UpperCAmelCase_ ), "end_token": [-1_0_0] * len(UpperCAmelCase_ ), "category": category, }, } _UpperCamelCase : Any = out['context'].split() _UpperCamelCase : Optional[int] = splitted_context[answer['end_token']] _UpperCamelCase : Optional[Any] = len( tokenizer( ' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=UpperCAmelCase_ , ).input_ids ) _UpperCamelCase : Optional[int] = len( tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=UpperCAmelCase_ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token _UpperCamelCase : Tuple = len(tokenizer(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 _UpperCamelCase : Union[str, Any] = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive _UpperCamelCase : List[Any] = answer['start_token'] _UpperCamelCase : Any = answer['end_token'] if assertion: _UpperCamelCase : Dict = tokenizer.decode(UpperCAmelCase_ ) if answer["span"] != new: print('ISSUE IN TOKENIZATION' ) print('OLD:' , answer['span'] ) print('NEW:' , UpperCAmelCase_ , end='\n\n' ) if len(UpperCAmelCase_ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } _UpperCamelCase : Optional[int] = input_ids[:q_len] _UpperCamelCase : Dict = range(UpperCAmelCase_ , len(UpperCAmelCase_ ) , max_length - doc_stride ) _UpperCamelCase : Any = [] _UpperCamelCase : List[Any] = [] _UpperCamelCase : Any = [] _UpperCamelCase : int = [] # null, yes, no, long, short for i in doc_start_indices: _UpperCamelCase : str = i + max_length - q_len _UpperCamelCase : Tuple = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: _UpperCamelCase : int = start_token - i + q_len _UpperCamelCase : Any = end_token - i + q_len answers_category.append(answer['category'][0] ) # ["short"] -> "short" else: _UpperCamelCase : Union[str, Any] = -1_0_0 _UpperCamelCase : List[str] = -1_0_0 answers_category.append('null' ) _UpperCamelCase : Optional[Any] = inputs[-1][start_token : end_token + 1] answers_start_token.append(UpperCAmelCase_ ) answers_end_token.append(UpperCAmelCase_ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('ISSUE in strided for ID:' , example['id'] ) print('New:' , tokenizer.decode(UpperCAmelCase_ ) ) print('Old:' , tokenizer.decode(UpperCAmelCase_ ) , end='\n\n' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=2_0_4_8 , UpperCAmelCase_=4_0_9_6 , UpperCAmelCase_=False ): _UpperCamelCase : Dict = get_strided_contexts_and_ans( UpperCAmelCase_ , UpperCAmelCase_ , doc_stride=UpperCAmelCase_ , max_length=UpperCAmelCase_ , assertion=UpperCAmelCase_ , ) return example def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): with jsonlines.open(UpperCAmelCase_ , 'a' ) as writer: for example in tqdm(UpperCAmelCase_ , total=len(UpperCAmelCase_ ) , desc='Saving samples ... ' ): _UpperCamelCase : Union[str, Any] = example['labels'] for ids, start, end, cat in zip( example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { 'input_ids': ids, 'start_token': start, 'end_token': end, 'category': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer snake_case_ : List[Any] = load_dataset('natural_questions') snake_case_ : List[Any] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') snake_case_ : str = data['train' if PROCESS_TRAIN == 'true' else 'validation'] snake_case_ : str = { 'tokenizer': tokenizer, 'doc_stride': DOC_STRIDE, 'max_length': MAX_LENGTH, 'assertion': False, } snake_case_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs) snake_case_ : List[str] = data.remove_columns(['annotations', 'document', 'id', 'question']) print(data) np.random.seed(SEED) snake_case_ : Any = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl' save_to_disk(data, file_name=cache_file_name)
83
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
0
"""simple docstring""" import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __UpperCAmelCase = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = TOKEN HfFolder.save_token(__A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[Any]: try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase_ :Optional[Any] = FlaxBertModel(__A ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) lowerCAmelCase_ :Union[str, Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase_ :str = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :Union[str, Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__A , repo_id="""test-model-flax""" , push_to_hub=__A , use_auth_token=self._token ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase_ :Dict = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase_ :Optional[int] = FlaxBertModel(__A ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowerCAmelCase_ :Tuple = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :str = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __A , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__A , use_auth_token=self._token ) lowerCAmelCase_ :Dict = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowerCAmelCase_ :str = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :int = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = True lowerCAmelCase_ :Any = flatten_dict(modela.params ) lowerCAmelCase_ :Dict = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase_ :str = False return models_are_equal @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowerCAmelCase_ :Union[str, Any] = FlaxBertModel(__A ) lowerCAmelCase_ :Union[str, Any] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__A , __A ) ) with self.assertRaises(__A ): lowerCAmelCase_ :List[Any] = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :List[Any] = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertTrue(check_models_equal(__A , __A ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowerCAmelCase_ :List[Any] = FlaxBertModel(__A ) lowerCAmelCase_ :str = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__A , __A ) , max_shard_size="""10KB""" ) with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :int = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertTrue(check_models_equal(__A , __A ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :int = """bert""" lowerCAmelCase_ :Any = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertIsNotNone(__A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[Any] = """bert""" lowerCAmelCase_ :str = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(__A ): lowerCAmelCase_ :Dict = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :Optional[int] = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertIsNotNone(__A )
84
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
0
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : Tuple = logging.get_logger("transformers.models.speecht5") _SCREAMING_SNAKE_CASE : int = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } _SCREAMING_SNAKE_CASE : Tuple = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } _SCREAMING_SNAKE_CASE : Optional[Any] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } _SCREAMING_SNAKE_CASE : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } _SCREAMING_SNAKE_CASE : Optional[int] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } _SCREAMING_SNAKE_CASE : Union[str, Any] = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } _SCREAMING_SNAKE_CASE : int = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } _SCREAMING_SNAKE_CASE : Union[str, Any] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } _SCREAMING_SNAKE_CASE : List[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _SCREAMING_SNAKE_CASE : List[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _SCREAMING_SNAKE_CASE : int = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _SCREAMING_SNAKE_CASE : Tuple = [] _SCREAMING_SNAKE_CASE : str = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] _SCREAMING_SNAKE_CASE : str = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] _SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] _SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def UpperCamelCase_( snake_case : Any , snake_case : Any , snake_case : Union[str, Any] , snake_case : Any , snake_case : str ): '''simple docstring''' for attribute in key.split("." ): snake_case_ = getattr(snake_case , snake_case ) if weight_type is not None: snake_case_ = getattr(snake_case , snake_case ).shape else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "running_mean": snake_case_ = value elif weight_type == "running_var": snake_case_ = value elif weight_type == "num_batches_tracked": snake_case_ = value else: snake_case_ = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def UpperCamelCase_( snake_case : Tuple , snake_case : Optional[int] ): '''simple docstring''' for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: snake_case_ , snake_case_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase_( snake_case : int , snake_case : List[Any] , snake_case : str ): '''simple docstring''' snake_case_ = [] if task == "s2t": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2T snake_case_ = IGNORE_KEYS_S2T elif task == "t2s": snake_case_ = None snake_case_ = MAPPING_T2S snake_case_ = IGNORE_KEYS_T2S elif task == "s2s": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2S snake_case_ = IGNORE_KEYS_S2S else: raise ValueError(f'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(snake_case , snake_case ): logger.info(f'{name} was ignored' ) continue snake_case_ = False if "conv_layers" in name: load_conv_layer( snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , ) snake_case_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: snake_case_ , snake_case_ = key.split(".*." ) if prefix in name and suffix in name: snake_case_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(snake_case )[0].split("." )[-2] snake_case_ = mapped_key.replace("*" , snake_case ) if "weight_g" in name: snake_case_ = "weight_g" elif "weight_v" in name: snake_case_ = "weight_v" elif "bias" in name: snake_case_ = "bias" elif "weight" in name: snake_case_ = "weight" elif "running_mean" in name: snake_case_ = "running_mean" elif "running_var" in name: snake_case_ = "running_var" elif "num_batches_tracked" in name: snake_case_ = "num_batches_tracked" else: snake_case_ = None set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case ) continue if not is_used: unused_weights.append(snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Dict ): '''simple docstring''' snake_case_ = full_name.split("conv_layers." )[-1] snake_case_ = name.split("." ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(snake_case ) @torch.no_grad() def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : Dict , snake_case : Union[str, Any]=None , snake_case : Dict=None , snake_case : List[Any]=None , ): '''simple docstring''' if config_path is not None: snake_case_ = SpeechTaConfig.from_pretrained(snake_case ) else: snake_case_ = SpeechTaConfig() if task == "s2t": snake_case_ = config.max_text_positions snake_case_ = SpeechTaForSpeechToText(snake_case ) elif task == "t2s": snake_case_ = 1_8_7_6 snake_case_ = 6_0_0 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForTextToSpeech(snake_case ) elif task == "s2s": snake_case_ = 1_8_7_6 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForSpeechToSpeech(snake_case ) else: raise ValueError(f'Unknown task name: {task}' ) if vocab_path: snake_case_ = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it snake_case_ = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case ) snake_case_ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) snake_case_ = SpeechTaFeatureExtractor() snake_case_ = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case ) processor.save_pretrained(snake_case ) snake_case_ = torch.load(snake_case ) recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case ) model.save_pretrained(snake_case ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(snake_case ) model.push_to_hub(snake_case ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
85
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
0
import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def lowercase_ ( _lowerCamelCase : str): # fetching a list of articles in json format lowercase__ : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1): print(f'''{i}.) {article["title"]}''') if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
87
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
0
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads __magic_name__ = window_size __magic_name__ = mlp_ratio __magic_name__ = qkv_bias __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = drop_path_rate __magic_name__ = hidden_act __magic_name__ = use_absolute_embeddings __magic_name__ = patch_norm __magic_name__ = layer_norm_eps __magic_name__ = initializer_range __magic_name__ = is_training __magic_name__ = scope __magic_name__ = use_labels __magic_name__ = type_sequence_label_size __magic_name__ = encoder_stride __magic_name__ = out_features __magic_name__ = out_indices def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def _lowercase ( self : Tuple ) -> str: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCamelCase__ ): __magic_name__ = ["""stem"""] __magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ ) def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False def _lowercase ( self : Any ) -> List[str]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def _lowercase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def _lowercase ( self : str ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" return def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) @unittest.skip("""Swin does not use inputs_embeds""" ) def _lowercase ( self : Any ) -> int: """simple docstring""" pass @unittest.skip("""Swin does not support feedforward chunking""" ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" pass def _lowercase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" pass def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any: """simple docstring""" __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # Swin has a different seq_length __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ): __magic_name__ = 0 return t def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ): with torch.no_grad(): __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has''' F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.''' ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _A ): '''simple docstring''' a__ = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ = MaskFormerSwinConfig def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = MaskFormerSwinModelTester(self ) def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: __magic_name__ = backbone_class(UpperCamelCase__ ) backbone.to(UpperCamelCase__ ) backbone.eval() __magic_name__ = backbone(**UpperCamelCase__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ ) self.assertIsNotNone(outputs.attentions )
88
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = '''▁''' __lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __lowerCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } __lowerCAmelCase = { '''facebook/m2m100_418M''': 1_024, } # fmt: off __lowerCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask'] lowerCAmelCase : List[int] = [] lowerCAmelCase : List[int] = [] def __init__( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Tuple="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : List[Any]=8 ,**_UpperCAmelCase : Union[str, Any] ,): _a : int = {} if sp_model_kwargs is None else sp_model_kwargs _a : int = language_codes _a : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] _a : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} _a : List[str] = kwargs.get('additional_special_tokens' ,[] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_UpperCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : Any = vocab_file _a : int = load_json(_UpperCAmelCase ) _a : Dict = {v: k for k, v in self.encoder.items()} _a : int = spm_file _a : int = load_spm(_UpperCAmelCase ,self.sp_model_kwargs ) _a : Any = len(self.encoder ) _a : str = { self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase ) } _a : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )} _a : Dict = {v: k for k, v in self.lang_token_to_id.items()} _a : Union[str, Any] = src_lang if src_lang is not None else 'en' _a : Optional[int] = tgt_lang _a : Optional[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) _a : List[Any] = num_madeup_words @property def __lowercase ( self : int ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def __lowercase ( self : Optional[int] ): return self._src_lang @src_lang.setter def __lowercase ( self : str ,_UpperCAmelCase : str ): _a : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_UpperCAmelCase ,self.unk_token ) def __lowercase ( self : int ,_UpperCAmelCase : Dict ): _a : Dict = [] _a : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCAmelCase ) + token _a : Optional[Any] = [] else: current_sub_tokens.append(_UpperCAmelCase ) out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) _a : List[str] = [1] * len(self.prefix_tokens ) _a : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowercase ( self : List[Any] ): _a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _a : Dict = self.__dict__.copy() _a : Any = None return state def __setstate__( self : Union[str, Any] ,_UpperCAmelCase : Dict ): _a : List[Any] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : Union[str, Any] = {} _a : Any = load_spm(self.spm_file ,self.sp_model_kwargs ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : List[str] = Path(_UpperCAmelCase ) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""" ) _a : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _a : Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder ,_UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file ,_UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase ,'wb' ) as fi: _a : Tuple = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : List[Any] ,): _a : int = src_lang _a : List[str] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _a : List[str] = src_lang _a : Optional[int] = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[str] = self.get_lang_id(_UpperCAmelCase ) _a : List[str] = tgt_lang_id return inputs def __lowercase ( self : List[str] ): self.set_src_lang_special_tokens(self.src_lang ) def __lowercase ( self : Any ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): _a : Tuple = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Union[str, Any] = [self.cur_lang_id] _a : Union[str, Any] = [self.eos_token_id] def __lowercase ( self : List[str] ,_UpperCAmelCase : str ): _a : int = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Dict = [self.cur_lang_id] _a : Optional[int] = [self.eos_token_id] def __lowercase ( self : str ,_UpperCAmelCase : str ): return self.lang_code_to_token[lang] def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ): _a : Optional[int] = self.get_lang_token(_UpperCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor: _a : int = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ ) spm.Load(str(lowerCAmelCase_ ) ) return spm def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]: with open(lowerCAmelCase_ , 'r' ) as f: return json.load(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: with open(lowerCAmelCase_ , 'w' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
89
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", type=str, required=True, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--image_size", default=5_12, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any: """simple docstring""" if string == "True": return True elif string == "False": return False else: raise ValueError(F"""could not parse string as bool {string}""" ) parser.add_argument( "--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool ) parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
90
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
0
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any]=sys.maxsize): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = '''bilinear''' SCREAMING_SNAKE_CASE_ : Optional[int] = max_size SCREAMING_SNAKE_CASE_ : Any = short_edge_length def __call__( self : str , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for img in imgs: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize SCREAMING_SNAKE_CASE_ : Any = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img SCREAMING_SNAKE_CASE_ : Dict = size * 1.0 / min(lowercase_ , lowercase_) if h < w: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = size, scale * w else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = scale * h, size if max(lowercase_ , lowercase_) > self.max_size: SCREAMING_SNAKE_CASE_ : List[str] = self.max_size * 1.0 / max(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Any = newh * scale SCREAMING_SNAKE_CASE_ : str = neww * scale SCREAMING_SNAKE_CASE_ : int = int(neww + 0.5) SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(newh + 0.5) if img.dtype == np.uinta: SCREAMING_SNAKE_CASE_ : Tuple = Image.fromarray(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_) else: SCREAMING_SNAKE_CASE_ : int = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw SCREAMING_SNAKE_CASE_ : int = nn.functional.interpolate( lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0) img_augs.append(lowercase_) return img_augs class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) SCREAMING_SNAKE_CASE_ : Any = cfg.INPUT.FORMAT SCREAMING_SNAKE_CASE_ : List[str] = cfg.SIZE_DIVISIBILITY SCREAMING_SNAKE_CASE_ : List[str] = cfg.PAD_VALUE SCREAMING_SNAKE_CASE_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST SCREAMING_SNAKE_CASE_ : List[Any] = cfg.MODEL.DEVICE SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) SCREAMING_SNAKE_CASE_ : Dict = lambda lowercase_: (x - self.pixel_mean) / self.pixel_std def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = tuple(max(lowercase_) for s in zip(*[img.shape for img in images])) SCREAMING_SNAKE_CASE_ : List[str] = [im.shape[-2:] for im in images] SCREAMING_SNAKE_CASE_ : int = [ nn.functional.pad( lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowercase_ , lowercase_) ] return torch.stack(lowercase_), torch.tensor(lowercase_) def __call__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any]=False): '''simple docstring''' with torch.no_grad(): if not isinstance(lowercase_ , lowercase_): SCREAMING_SNAKE_CASE_ : Optional[Any] = [images] if single_image: assert len(lowercase_) == 1 for i in range(len(lowercase_)): if isinstance(images[i] , torch.Tensor): images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([im.shape[:2] for im in images]) SCREAMING_SNAKE_CASE_ : Optional[int] = self.aug(lowercase_) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.normalizer(lowercase_) for x in images] # now pad them to do the following operations SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.pad(lowercase_) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad SCREAMING_SNAKE_CASE_ : Optional[int] = torch.true_divide(lowercase_ , lowercase_) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _A (__a , __a ) -> Dict: """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _A (__a , __a ) -> Any: """simple docstring""" assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = box_size tensor[:, 0].clamp_(min=0 , max=__a ) tensor[:, 1].clamp_(min=0 , max=__a ) tensor[:, 2].clamp_(min=0 , max=__a ) tensor[:, 3].clamp_(min=0 , max=__a )
91
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCamelCase__ = """bart""" UpperCamelCase__ = True @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def _a ( ): if LOAD_DENSE_INDEX: __lowerCAmelCase = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) __lowerCAmelCase = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) __lowerCAmelCase = qar_model.eval() else: __lowerCAmelCase , __lowerCAmelCase = (None, None) if MODEL_TYPE == "bart": __lowerCAmelCase = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) __lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) __lowerCAmelCase = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) __lowerCAmelCase = sas_model.eval() else: __lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model( model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def _a ( ): if LOAD_DENSE_INDEX: __lowerCAmelCase = faiss.StandardGpuResources() __lowerCAmelCase = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"] __lowerCAmelCase = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_28) , ) __lowerCAmelCase = faiss.IndexFlatIP(1_28 ) __lowerCAmelCase = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ ) wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE_ ) # TODO fix for larger GPU else: __lowerCAmelCase , __lowerCAmelCase = (None, None) __lowerCAmelCase = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = datasets.load_dataset("eli5" , name="LFQA_reddit" ) __lowerCAmelCase = elia["train_eli5"] __lowerCAmelCase = np.memmap( "eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_28) ) __lowerCAmelCase = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(SCREAMING_SNAKE_CASE_ ) return (elia_train, eli5_train_q_index) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_indexes() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_models() UpperCamelCase__ , UpperCamelCase__ = load_train_data() def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=10 ): __lowerCAmelCase = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [elia_train[int(SCREAMING_SNAKE_CASE_ )] for i in I[0]] return nn_examples def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int="wiki40b" , SCREAMING_SNAKE_CASE_ : List[Any]="dense" , SCREAMING_SNAKE_CASE_ : int=10 ): if source == "none": __lowerCAmelCase , __lowerCAmelCase = (" <P> ".join(["" for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: __lowerCAmelCase , __lowerCAmelCase = query_es_index( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index_name="english_wiki40b_snippets_100w" , n_results=SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] __lowerCAmelCase = "question: {} context: {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda SCREAMING_SNAKE_CASE_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE_ : None), } ) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=64 , SCREAMING_SNAKE_CASE_ : Dict=2_56 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.95 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.8 ): with torch.no_grad(): __lowerCAmelCase = qa_sas_generate( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , temp=SCREAMING_SNAKE_CASE_ , top_p=SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ , max_input_length=10_24 , device="cuda:0" , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar UpperCamelCase__ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" UpperCamelCase__ = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCamelCase__ = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) UpperCamelCase__ = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] UpperCamelCase__ = st.sidebar.checkbox("""Demo options""") if demo_options: UpperCamelCase__ = st.sidebar.selectbox( """""", action_list, index=3, ) UpperCamelCase__ = action_list.index(action_st) UpperCamelCase__ = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) UpperCamelCase__ = show_type == """Show full text of passages""" else: UpperCamelCase__ = 3 UpperCamelCase__ = True UpperCamelCase__ = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: UpperCamelCase__ = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) UpperCamelCase__ = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) UpperCamelCase__ = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: UpperCamelCase__ = """wiki40b""" UpperCamelCase__ = """dense""" UpperCamelCase__ = """beam""" UpperCamelCase__ = 2 UpperCamelCase__ = 64 UpperCamelCase__ = 256 UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = st.sidebar.checkbox("""Generation options""") if generate_options: UpperCamelCase__ = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) UpperCamelCase__ = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) UpperCamelCase__ = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) UpperCamelCase__ = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": UpperCamelCase__ = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCamelCase__ = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) UpperCamelCase__ = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) UpperCamelCase__ = None # start main text UpperCamelCase__ = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] UpperCamelCase__ = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCamelCase__ = st.text_input("""Enter your question here:""", """""") else: UpperCamelCase__ = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": UpperCamelCase__ , UpperCamelCase__ = make_support(question, source=wiki_source, method="""dense""", n_results=10) UpperCamelCase__ , UpperCamelCase__ = make_support(question, source=wiki_source, method="""sparse""", n_results=10) UpperCamelCase__ = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCamelCase__ = support_list[:10] UpperCamelCase__ = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: UpperCamelCase__ , UpperCamelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCamelCase__ , UpperCamelCase__ = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): UpperCamelCase__ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) UpperCamelCase__ = res[1].strip() if sec_titles == "": UpperCamelCase__ = """[{}]({})""".format(res[0], wiki_url) else: UpperCamelCase__ = sec_titles.split(""" & """) UpperCamelCase__ = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: UpperCamelCase__ = find_nearest_training(question) UpperCamelCase__ = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) UpperCamelCase__ = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) UpperCamelCase__ = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
92
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict=None ): """simple docstring""" lowercase_ : Any = argparse.ArgumentParser(add_help=__SCREAMING_SNAKE_CASE , allow_abbrev=__SCREAMING_SNAKE_CASE ) # The main config parser lowercase_ : List[Any] = config_command_parser(__SCREAMING_SNAKE_CASE ) # The subparser to add commands to lowercase_ : int = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(__SCREAMING_SNAKE_CASE , parents=[parent_parser] ) update_command_parser(__SCREAMING_SNAKE_CASE , parents=[parent_parser] ) return config_parser def snake_case_ ( ): """simple docstring""" lowercase_ : int = get_config_parser() lowercase_ : List[Any] = config_parser.parse_args() if not hasattr(__SCREAMING_SNAKE_CASE , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
93
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
0
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['image_processor'] SCREAMING_SNAKE_CASE__ = 'SamImageProcessor' def __init__( self , _lowerCamelCase ): super().__init__(_lowerCamelCase ) a :Dict = self.image_processor a :str = -10 a :List[str] = self.image_processor.size['''longest_edge'''] def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ): a :List[Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) # pop arguments that are not used in the foward but used nevertheless a :Any = encoding_image_processor['''original_sizes'''] if hasattr(_lowerCamelCase , '''numpy''' ): # Checks if Torch or TF tensor a :Union[str, Any] = original_sizes.numpy() a , a , a :Optional[Any] = self._check_and_preprocess_points( input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , ) a :Optional[Any] = self._normalize_and_convert( _lowerCamelCase , _lowerCamelCase , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="pt" , ): if input_points is not None: if len(_lowerCamelCase ) != len(_lowerCamelCase ): a :Tuple = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] ) for point in input_points ] else: a :str = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase ) for point, original_size in zip(_lowerCamelCase , _lowerCamelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: a , a :Tuple = self._pad_points_and_labels(_lowerCamelCase , _lowerCamelCase ) a :List[str] = np.array(_lowerCamelCase ) if input_labels is not None: a :Tuple = np.array(_lowerCamelCase ) if input_boxes is not None: if len(_lowerCamelCase ) != len(_lowerCamelCase ): a :Dict = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] , is_bounding_box=_lowerCamelCase ) for box in input_boxes ] else: a :str = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase , is_bounding_box=_lowerCamelCase ) for box, original_size in zip(_lowerCamelCase , _lowerCamelCase ) ] a :Union[str, Any] = np.array(_lowerCamelCase ) if input_boxes is not None: if return_tensors == "pt": a :Optional[Any] = torch.from_numpy(_lowerCamelCase ) # boxes batch size of 1 by default a :Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": a :Dict = tf.convert_to_tensor(_lowerCamelCase ) # boxes batch size of 1 by default a :Tuple = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": a :List[Any] = torch.from_numpy(_lowerCamelCase ) # point batch size of 1 by default a :Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": a :Tuple = tf.convert_to_tensor(_lowerCamelCase ) # point batch size of 1 by default a :int = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": a :Dict = torch.from_numpy(_lowerCamelCase ) # point batch size of 1 by default a :int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": a :Union[str, Any] = tf.convert_to_tensor(_lowerCamelCase ) # point batch size of 1 by default a :Dict = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :List[Any] = max([point.shape[0] for point in input_points] ) a :Union[str, Any] = [] for i, point in enumerate(_lowerCamelCase ): if point.shape[0] != expected_nb_points: a :int = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) a :Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(_lowerCamelCase ) a :Any = processed_input_points return input_points, input_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): a , a :str = original_size a , a :Optional[Any] = self.image_processor._get_preprocess_shape(_lowerCamelCase , longest_edge=_lowerCamelCase ) a :List[Any] = deepcopy(_lowerCamelCase ).astype(_lowerCamelCase ) if is_bounding_box: a :Dict = coords.reshape(-1 , 2 , 2 ) a :Tuple = coords[..., 0] * (new_w / old_w) a :Dict = coords[..., 1] * (new_h / old_h) if is_bounding_box: a :Any = coords.reshape(-1 , 4 ) return coords def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ): if input_points is not None: if hasattr(_lowerCamelCase , '''numpy''' ): # Checks for TF or Torch tensor a :int = input_points.numpy().tolist() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_points[0] , _lowerCamelCase ): raise ValueError('''Input points must be a list of list of floating points.''' ) a :Dict = [np.array(_lowerCamelCase ) for input_point in input_points] else: a :List[str] = None if input_labels is not None: if hasattr(_lowerCamelCase , '''numpy''' ): a :Optional[int] = input_labels.numpy().tolist() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_labels[0] , _lowerCamelCase ): raise ValueError('''Input labels must be a list of list integers.''' ) a :List[Any] = [np.array(_lowerCamelCase ) for label in input_labels] else: a :Optional[Any] = None if input_boxes is not None: if hasattr(_lowerCamelCase , '''numpy''' ): a :int = input_boxes.numpy().tolist() if ( not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_boxes[0] , _lowerCamelCase ) or not isinstance(input_boxes[0][0] , _lowerCamelCase ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) a :Optional[int] = [np.array(_lowerCamelCase ).astype(np.floataa ) for box in input_boxes] else: a :Optional[int] = None return input_points, input_labels, input_boxes @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.image_processor.model_input_names return list(dict.fromkeys(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.image_processor.post_process_masks(*_lowerCamelCase , **_lowerCamelCase )
94
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import functools def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(SCREAMING_SNAKE_CASE ) == 0: return 0 if min(SCREAMING_SNAKE_CASE ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(SCREAMING_SNAKE_CASE ) >= 366: raise ValueError("All days elements should be less than 366" ) a__ : Any =set(SCREAMING_SNAKE_CASE ) @functools.cache def dynamic_programming(SCREAMING_SNAKE_CASE : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
95
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = RobertaPreLayerNormConfig.from_pretrained( lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict _lowerCamelCase : str = torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) ) _lowerCamelCase : Dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): _lowerCamelCase : Any = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue _lowerCamelCase : Dict = tensor_value _lowerCamelCase : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ ) model.save_pretrained(lowercase__ ) # convert tokenizer _lowerCamelCase : Dict = AutoTokenizer.from_pretrained(lowercase__ ) tokenizer.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase__ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
96
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def a ( ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ :List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase__ :List[Any] = parser.parse_args() return args.f class lowercase ( A__ ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ): UpperCamelCase__ :Tuple = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(UpperCamelCase_ , 0.666 ) @slow @require_torch_non_multi_gpu def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(UpperCamelCase_ ) UpperCamelCase__ :Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(UpperCamelCase_ ) UpperCamelCase__ :str = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(UpperCamelCase_ )
97
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
0
"""simple docstring""" from __future__ import annotations def a_ ( lowerCamelCase , lowerCamelCase ): if b == 0: return (1, 0) ((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , a % b ) UpperCAmelCase__ = a // b return (y, x - k * y) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): ((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = na * na UpperCAmelCase__ = ra * x * na + ra * y * na return (n % m + m) % m def a_ ( lowerCamelCase , lowerCamelCase ): ((UpperCAmelCase__) , (UpperCAmelCase__)) = extended_euclid(lowerCamelCase , lowerCamelCase ) if b < 0: UpperCAmelCase__ = (b % n + n) % n return b def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ , UpperCAmelCase__ = invert_modulo(lowerCamelCase , lowerCamelCase ), invert_modulo(lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = na * na UpperCAmelCase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
98
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
0
import argparse import collections import json import os import re import string import sys import numpy as np lowercase : Union[str, Any] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) lowercase : Union[str, Any] = None def A_ ( ) -> Dict: a__ : Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=A__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=A__ , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def A_ ( A__ ) -> int: a__ : Any = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Optional[int] = bool(qa['answers']['text'] ) return qid_to_has_ans def A_ ( A__ ) -> List[Any]: def remove_articles(A__ ): return ARTICLES_REGEX.sub(' ' , A__ ) def white_space_fix(A__ ): return " ".join(text.split() ) def remove_punc(A__ ): a__ : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(A__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) ) def A_ ( A__ ) -> Union[str, Any]: if not s: return [] return normalize_answer(A__ ).split() def A_ ( A__ , A__ ) -> Optional[Any]: return int(normalize_answer(A__ ) == normalize_answer(A__ ) ) def A_ ( A__ , A__ ) -> Any: a__ : Tuple = get_tokens(A__ ) a__ : Optional[int] = get_tokens(A__ ) a__ : int = collections.Counter(A__ ) & collections.Counter(A__ ) a__ : Optional[Any] = sum(common.values() ) if len(A__ ) == 0 or len(A__ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : int = 1.0 * num_same / len(A__ ) a__ : List[Any] = 1.0 * num_same / len(A__ ) a__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def A_ ( A__ , A__ ) -> Any: a__ : Tuple = {} a__ : int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Optional[int] = qa['id'] a__ : Any = [t for t in qa['answers']['text'] if normalize_answer(A__ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : List[str] = [''] if qid not in preds: print(F'Missing prediction for {qid}' ) continue a__ : Union[str, Any] = preds[qid] # Take max over all gold answers a__ : Tuple = max(compute_exact(A__ , A__ ) for a in gold_answers ) a__ : List[Any] = max(compute_fa(A__ , A__ ) for a in gold_answers ) return exact_scores, fa_scores def A_ ( A__ , A__ , A__ , A__ ) -> Tuple: a__ : List[Any] = {} for qid, s in scores.items(): a__ : Tuple = na_probs[qid] > na_prob_thresh if pred_na: a__ : str = float(not qid_to_has_ans[qid] ) else: a__ : int = s return new_scores def A_ ( A__ , A__ , A__=None ) -> List[Any]: if not qid_list: a__ : str = len(A__ ) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values() ) / total), ('f1', 1_00.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: a__ : int = len(A__ ) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def A_ ( A__ , A__ , A__ ) -> Optional[int]: for k in new_eval: a__ : Optional[int] = new_eval[k] def A_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]: plt.step(A__ , A__ , color='b' , alpha=0.2 , where='post' ) plt.fill_between(A__ , A__ , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(A__ ) plt.savefig(A__ ) plt.clf() def A_ ( A__ , A__ , A__ , A__ , A__=None , A__=None ) -> Any: a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] ) a__ : Tuple = 0.0 a__ : List[str] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Optional[int] = [0.0] a__ : Tuple = 0.0 for i, qid in enumerate(A__ ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Union[str, Any] = true_pos / float(i + 1 ) a__ : List[Any] = true_pos / float(A__ ) if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(A__ ) recalls.append(A__ ) if out_image: plot_pr_curve(A__ , A__ , A__ , A__ ) return {"ap": 1_00.0 * avg_prec} def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> str: if out_image_dir and not os.path.exists(A__ ): os.makedirs(A__ ) a__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) a__ : Optional[int] = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) a__ : int = {k: float(A__ ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(A__ , A__ , 'pr_exact' ) merge_eval(A__ , A__ , 'pr_f1' ) merge_eval(A__ , A__ , 'pr_oracle' ) def A_ ( A__ , A__ , A__ , A__ ) -> List[Any]: if not qid_list: return a__ : List[str] = [na_probs[k] for k in qid_list] a__ : Dict = np.ones_like(A__ ) / float(len(A__ ) ) plt.hist(A__ , weights=A__ , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(F'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(A__ , F'na_prob_hist_{name}.png' ) ) plt.clf() def A_ ( A__ , A__ , A__ , A__ ) -> Optional[int]: a__ : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : List[str] = num_no_ans a__ : Tuple = cur_score a__ : Tuple = 0.0 a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] ) for i, qid in enumerate(A__ ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : str = scores[qid] else: if preds[qid]: a__ : int = -1 else: a__ : Tuple = 0 cur_score += diff if cur_score > best_score: a__ : Dict = cur_score a__ : Tuple = na_probs[qid] return 1_00.0 * best_score / len(A__ ), best_thresh def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: a__ , a__ : str = find_best_thresh(A__ , A__ , A__ , A__ ) a__ , a__ : Tuple = find_best_thresh(A__ , A__ , A__ , A__ ) a__ : Optional[Any] = best_exact a__ : Optional[Any] = exact_thresh a__ : Tuple = best_fa a__ : int = fa_thresh def A_ ( ) -> Union[str, Any]: with open(OPTS.data_file ) as f: a__ : Optional[int] = json.load(A__ ) a__ : Any = dataset_json['data'] with open(OPTS.pred_file ) as f: a__ : Optional[Any] = json.load(A__ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : int = json.load(A__ ) else: a__ : Union[str, Any] = {k: 0.0 for k in preds} a__ : str = make_qid_to_has_ans(A__ ) # maps qid to True/False a__ : int = [k for k, v in qid_to_has_ans.items() if v] a__ : Tuple = [k for k, v in qid_to_has_ans.items() if not v] a__ , a__ : str = get_raw_scores(A__ , A__ ) a__ : str = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh ) a__ : Union[str, Any] = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh ) a__ : List[Any] = make_eval_dict(A__ , A__ ) if has_ans_qids: a__ : str = make_eval_dict(A__ , A__ , qid_list=A__ ) merge_eval(A__ , A__ , 'HasAns' ) if no_ans_qids: a__ : int = make_eval_dict(A__ , A__ , qid_list=A__ ) merge_eval(A__ , A__ , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir ) histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(A__ , A__ ) else: print(json.dumps(A__ , indent=2 ) ) if __name__ == "__main__": lowercase : List[Any] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
99
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Optional[int] = '''dpr''' def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__ = 0 , **lowerCAmelCase__ , ): super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = projection_dim __SCREAMING_SNAKE_CASE = position_embedding_type
100
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowercase__ :List[Any] = logging.getLogger(__name__) class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Any ='''summarization''' lowercase_ : Optional[Any] =['''loss'''] lowercase_ : Union[str, Any] =ROUGE_KEYS lowercase_ : Optional[int] ='''rouge2''' def __init__( self ,A__ ,**A__): if hparams.sortish_sampler and hparams.gpus > 1: lowercase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''') if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''') super().__init__(A__ ,num_labels=A__ ,mode=self.mode ,**A__) use_task_specific_params(self.model ,'''summarization''') save_git_info(self.hparams.output_dir) lowercase = Path(self.output_dir) / '''metrics.json''' lowercase = Path(self.output_dir) / '''hparams.pkl''' pickle_save(self.hparams ,self.hparams_save_path) lowercase = 0 lowercase = defaultdict(A__) lowercase = self.config.model_type lowercase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size lowercase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowercase = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } lowercase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowercase = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) lowercase = get_git_info()['''repo_sha'''] lowercase = hparams.num_workers lowercase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,A__): lowercase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowercase = self.decoder_start_token_id lowercase = ( SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''') else LegacySeqaSeqDataset ) lowercase = False lowercase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowercase = self.hparams.eval_max_gen_length else: lowercase = self.model.config.max_length lowercase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def A__ ( self ,A__): lowercase = { k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(A__ ,Path(self.output_dir) / '''text_batch.json''') save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir) / '''tok_batch.json''') lowercase = True return readable_batch def A__ ( self ,A__ ,**A__): return self.model(A__ ,**A__) def A__ ( self ,A__): lowercase = self.tokenizer.batch_decode( A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__) return lmap(str.strip ,A__) def A__ ( self ,A__): lowercase = self.tokenizer.pad_token_id lowercase , lowercase = batch['''input_ids'''], batch['''attention_mask'''] lowercase = batch['''labels'''] if isinstance(self.model ,A__): lowercase = self.model._shift_right(A__) else: lowercase = shift_tokens_right(A__ ,A__) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowercase = decoder_input_ids self.save_readable_batch(A__) lowercase = self(A__ ,attention_mask=A__ ,decoder_input_ids=A__ ,use_cache=A__) lowercase = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowercase = nn.CrossEntropyLoss(ignore_index=A__) assert lm_logits.shape[-1] == self.vocab_size lowercase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1]) ,tgt_ids.view(-1)) else: lowercase = nn.functional.log_softmax(A__ ,dim=-1) lowercase , lowercase = label_smoothed_nll_loss( A__ ,A__ ,self.hparams.label_smoothing ,ignore_index=A__) return (loss,) @property def A__ ( self): return self.tokenizer.pad_token_id def A__ ( self ,A__ ,A__): lowercase = self._step(A__) lowercase = dict(zip(self.loss_names ,A__)) # tokens per batch lowercase = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum() lowercase = batch['''input_ids'''].shape[0] lowercase = batch['''input_ids'''].eq(self.pad).sum() lowercase = batch['''input_ids'''].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def A__ ( self ,A__ ,A__): return self._generative_step(A__) def A__ ( self ,A__ ,A__="val"): self.step_count += 1 lowercase = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} lowercase = losses['''loss'''] lowercase = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } lowercase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowercase = torch.tensor(A__).type_as(A__) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(A__) lowercase = {f'{prefix}_avg_{k}': x for k, x in losses.items()} lowercase = self.step_count self.metrics[prefix].append(A__) # callback writes this to self.metrics_save_path lowercase = flatten_list([x['''preds'''] for x in outputs]) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def A__ ( self ,A__ ,A__): return calculate_rouge(A__ ,A__) def A__ ( self ,A__): lowercase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowercase = self.model.generate( batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=A__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) lowercase = (time.time() - ta) / batch['''input_ids'''].shape[0] lowercase = self.ids_to_clean_text(A__) lowercase = self.ids_to_clean_text(batch['''labels''']) lowercase = self._step(A__) lowercase = dict(zip(self.loss_names ,A__)) lowercase = self.calc_generative_metrics(A__ ,A__) lowercase = np.mean(lmap(A__ ,A__)) base_metrics.update(gen_time=A__ ,gen_len=A__ ,preds=A__ ,target=A__ ,**A__) return base_metrics def A__ ( self ,A__ ,A__): return self._generative_step(A__) def A__ ( self ,A__): return self.validation_epoch_end(A__ ,prefix='''test''') def A__ ( self ,A__): lowercase = self.n_obs[type_path] lowercase = self.target_lens[type_path] lowercase = self.dataset_class( self.tokenizer ,type_path=A__ ,n_obs=A__ ,max_target_length=A__ ,**self.dataset_kwargs ,) return dataset def A__ ( self ,A__ ,A__ ,A__ = False): lowercase = self.get_dataset(A__) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowercase = dataset.make_sortish_sampler(A__ ,distributed=self.hparams.gpus > 1) return DataLoader( A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowercase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1) return DataLoader( A__ ,batch_sampler=A__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,) def A__ ( self): lowercase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=A__) return dataloader def A__ ( self): return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size) def A__ ( self): return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size) @staticmethod def A__ ( A__ ,A__): BaseTransformer.add_model_specific_args(A__ ,A__) add_generic_args(A__ ,A__) parser.add_argument( '''--max_source_length''' ,default=1_0_2_4 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--max_target_length''' ,default=5_6 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--val_max_target_length''' ,default=1_4_2 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--test_max_target_length''' ,default=1_4_2 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument('''--freeze_encoder''' ,action='''store_true''') parser.add_argument('''--freeze_embeds''' ,action='''store_true''') parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=A__) parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=A__) parser.add_argument('''--max_tokens_per_batch''' ,type=A__ ,default=A__) parser.add_argument('''--logger_name''' ,type=A__ ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''') parser.add_argument('''--n_train''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--n_val''' ,type=A__ ,default=5_0_0 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--n_test''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument( '''--task''' ,type=A__ ,default='''summarization''' ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--label_smoothing''' ,type=A__ ,default=0.0 ,required=A__) parser.add_argument('''--src_lang''' ,type=A__ ,default='''''' ,required=A__) parser.add_argument('''--tgt_lang''' ,type=A__ ,default='''''' ,required=A__) parser.add_argument('''--eval_beams''' ,type=A__ ,default=A__ ,required=A__) parser.add_argument( '''--val_metric''' ,type=A__ ,default=A__ ,required=A__ ,choices=['''bleu''', '''rouge2''', '''loss''', None]) parser.add_argument('''--eval_max_gen_length''' ,type=A__ ,default=A__ ,help='''never generate more than n tokens''') parser.add_argument('''--save_top_k''' ,type=A__ ,default=1 ,required=A__ ,help='''How many checkpoints to save''') parser.add_argument( '''--early_stopping_patience''' ,type=A__ ,default=-1 ,required=A__ ,help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) ,) return parser class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Optional[int] ='''translation''' lowercase_ : int =['''loss'''] lowercase_ : Optional[Any] =['''bleu'''] lowercase_ : Tuple ='''bleu''' def __init__( self ,A__ ,**A__): super().__init__(A__ ,**A__) lowercase = hparams.src_lang lowercase = hparams.tgt_lang def A__ ( self ,A__ ,A__): return calculate_bleu(A__ ,A__) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase__ ) check_output_dir(lowerCAmelCase__ , expected_items=3 ) if model is None: if "summarization" in args.task: lowercase = SummarizationModule(lowerCAmelCase__ ) else: lowercase = TranslationModule(lowerCAmelCase__ ) lowercase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): lowercase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowercase = os.environ.get('''WANDB_PROJECT''' , lowerCAmelCase__ ) lowercase = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowercase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowercase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowercase = False lowercase = args.val_metric == '''loss''' lowercase = generic_train( lowerCAmelCase__ , lowerCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase__ ) , early_stopping_callback=lowerCAmelCase__ , logger=lowerCAmelCase__ , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model lowercase = '''''' lowercase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=lowerCAmelCase__ ) ) if checkpoints: lowercase = checkpoints[-1] lowercase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowercase__ :int = argparse.ArgumentParser() lowercase__ :Union[str, Any] = pl.Trainer.add_argparse_args(parser) lowercase__ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowercase__ :Union[str, Any] = parser.parse_args() main(args)
101
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
0
"""simple docstring""" from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) SCREAMING_SNAKE_CASE : str = 2_9979_2458 # Symbols SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = symbols("""ct x y z""") def lowercase ( _snake_case : float ) ->float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowercase ( _snake_case : float ) ->float: """simple docstring""" return 1 / sqrt(1 - beta(_snake_case ) ** 2 ) def lowercase ( _snake_case : float ) ->np.ndarray: """simple docstring""" return np.array( [ [gamma(_snake_case ), -gamma(_snake_case ) * beta(_snake_case ), 0, 0], [-gamma(_snake_case ) * beta(_snake_case ), gamma(_snake_case ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase ( _snake_case : float , _snake_case : np.ndarray | None = None ) ->np.ndarray: """simple docstring""" if event is None: __snake_case : Optional[int] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_snake_case ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: SCREAMING_SNAKE_CASE : str = transform(2997_9245) print("""Example of four vector: """) print(F'ct\' = {four_vector[0]}') print(F'x\' = {four_vector[1]}') print(F'y\' = {four_vector[2]}') print(F'z\' = {four_vector[3]}') # Substitute symbols with numerical values SCREAMING_SNAKE_CASE : str = {ct: c, x: 1, y: 1, z: 1} SCREAMING_SNAKE_CASE : str = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'\n{numerical_vector}')
102
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
0
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 A__ : List[str] = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 A__ : List[str] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __snake_case : def __init__( self : List[Any]): lowerCAmelCase_ : Tuple = WATERMARK_BITS lowerCAmelCase_ : int = WatermarkEncoder() self.encoder.set_watermark('''bits''' , self.watermark) def UpperCAmelCase__ ( self : Optional[Any] , A_ : torch.FloatTensor): # can't encode images that are smaller than 256 if images.shape[-1] < 2_5_6: return images lowerCAmelCase_ : Optional[Any] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() lowerCAmelCase_ : Any = [self.encoder.encode(A_ , '''dwtDct''') for image in images] lowerCAmelCase_ : Optional[Any] = torch.from_numpy(np.array(A_)).permute(0 , 3 , 1 , 2) lowerCAmelCase_ : Optional[Any] = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0) return images
103
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _A ( A__ ): """simple docstring""" __lowercase , __lowercase = image.size __lowercase , __lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __lowercase = np.array(A__ ).astype(np.floataa ) / 2_5_5.0 __lowercase = image[None].transpose(0 , 3 , 1 , 2 ) __lowercase = torch.from_numpy(A__ ) return 2.0 * image - 1.0 class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] ,): super().__init__() self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ ) @torch.no_grad() def __call__( self : Dict ,lowercase__ : Union[torch.Tensor, PIL.Image.Image] = None ,lowercase__ : Optional[int] = 1 ,lowercase__ : Optional[int] = 1_0_0 ,lowercase__ : Optional[float] = 0.0 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,): if isinstance(lowercase__ ,PIL.Image.Image ): __lowercase = 1 elif isinstance(lowercase__ ,torch.Tensor ): __lowercase = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__ )}" ) if isinstance(lowercase__ ,PIL.Image.Image ): __lowercase = preprocess(lowercase__ ) __lowercase , __lowercase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __lowercase = (batch_size, self.unet.config.in_channels // 2, height, width) __lowercase = next(self.unet.parameters() ).dtype __lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=self.device ,dtype=lowercase__ ) __lowercase = image.to(device=self.device ,dtype=lowercase__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowercase__ ,device=self.device ) __lowercase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta for t in self.progress_bar(lowercase__ ): # concat latents and low resolution image in the channel dimension. __lowercase = torch.cat([latents, image] ,dim=1 ) __lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ ) # predict the noise residual __lowercase = self.unet(lowercase__ ,lowercase__ ).sample # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample # decode the image latents with the VQVAE __lowercase = self.vqvae.decode(lowercase__ ).sample __lowercase = torch.clamp(lowercase__ ,-1.0 ,1.0 ) __lowercase = image / 2 + 0.5 __lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(lowercase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase__ )
104
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : Dict = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Any: '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowercase ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]: '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main a : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(_lowercase , id=_lowercase )
105
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Optional[Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""] SCREAMING_SNAKE_CASE_ : List[str] = """CLIPImageProcessor""" SCREAMING_SNAKE_CASE_ : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Any ) -> str: a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) a = kwargs.pop("feature_extractor" ) a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str ) -> Any: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: a = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if images is not None: a = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is not None and images is not None: a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> List[str]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def __UpperCAmelCase ( self : Any ) -> str: a = self.tokenizer.model_input_names a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCAmelCase ( self : int ) -> Union[str, Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def __UpperCAmelCase ( self : Any ) -> Optional[int]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
107
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
0
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase__ = get_logger(__name__) class SCREAMING_SNAKE_CASE__ : """simple docstring""" a : Optional[Any] ="dummy_data" a : int ="datasets" a : Tuple =False def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Tuple = 0 lowerCAmelCase : int = dataset_name lowerCAmelCase : List[Any] = cache_dir lowerCAmelCase : List[str] = use_local_dummy_data lowerCAmelCase : List[str] = config # download_callbacks take a single url as input lowerCAmelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCAmelCase : Tuple = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCAmelCase : Union[str, Any] = str(snake_case__ ) # to be downloaded lowerCAmelCase : List[Any] = None lowerCAmelCase : List[Any] = None @property def lowercase__ ( self ): """simple docstring""" if self._dummy_file is None: lowerCAmelCase : Any = self.download_dummy_data() return self._dummy_file @property def lowercase__ ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowercase__ ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCAmelCase : str = cached_path( snake_case__ , cache_dir=self.cache_dir , extract_compressed_file=snake_case__ , force_extract=snake_case__ ) return os.path.join(snake_case__ , self.dummy_file_name ) @property def lowercase__ ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowercase__ ( self ): """simple docstring""" if self._bucket_url is None: lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowercase__ ( self ): """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowercase__ ( self , snake_case__ , *snake_case__ ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCAmelCase : int = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCAmelCase : List[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(snake_case__ , snake_case__ ): return self.create_dummy_data_dict(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , (list, tuple) ): return self.create_dummy_data_list(snake_case__ , snake_case__ ) else: return self.create_dummy_data_single(snake_case__ , snake_case__ ) def lowercase__ ( self , snake_case__ , *snake_case__ ): """simple docstring""" return self.download_and_extract(snake_case__ ) def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" return self.download_and_extract(snake_case__ ) def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ): """simple docstring""" return path def lowercase__ ( self ): """simple docstring""" return {} def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : List[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(snake_case__ , snake_case__ ): for single_url in single_urls: download_callback(snake_case__ ) else: lowerCAmelCase : List[str] = single_urls download_callback(snake_case__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase : Tuple = [os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) for x in single_urls] else: lowerCAmelCase : int = single_urls lowerCAmelCase : Any = os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) lowerCAmelCase : Union[str, Any] = value # make sure that values are unique if all(isinstance(snake_case__ , snake_case__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCAmelCase : Optional[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case__ ) ) for url in data_url ) lowerCAmelCase : Any = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCAmelCase : int = [data_url[0]] * len(snake_case__ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(snake_case__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCAmelCase : Dict = os.path.join(snake_case__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(snake_case__ ) return dummy_data_list def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(snake_case__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCAmelCase : Tuple = os.path.join(snake_case__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(snake_case__ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self , snake_case__ ): """simple docstring""" def _iter_archive_members(snake_case__ ): # this preserves the order of the members inside the ZIP archive lowerCAmelCase : str = Path(self.dummy_file ).parent lowerCAmelCase : Optional[Any] = path.relative_to(snake_case__ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCAmelCase : List[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(snake_case__ ) lowerCAmelCase : List[Any] = Path(snake_case__ ) lowerCAmelCase : str = _iter_archive_members(snake_case__ ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(snake_case__ ).as_posix(), file_path.open("rb" ) def lowercase__ ( self , snake_case__ ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): lowerCAmelCase : List[Any] = [paths] for path in paths: if os.path.isfile(snake_case__ ): if os.path.basename(snake_case__ ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(snake_case__ ): if os.path.basename(snake_case__ ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(snake_case__ ): if filename.startswith((".", "__") ): continue yield os.path.join(snake_case__ , snake_case__ )
108
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) UpperCAmelCase : Any = { """input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""] UpperCAmelCase : List[Any] = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. UpperCAmelCase : Optional[int] = tf.convert_to_tensor( [ [ [0.068_1762, 0.1089_4451, 0.0677_2504], [-0.0642_3668, 0.0236_6615, 0.0432_9344], [-0.0605_7295, 0.0997_4135, -0.0007_0584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
109
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
0
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ ( lowercase__ ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ): __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def snake_case_ ( self): __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 3_0_0 return config def snake_case_ ( self , lowerCAmelCase__): self.parent.assertListEqual(list(result.loss.size()) , []) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = DebertaModel(config=_A) model.to(_A) model.eval() __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A)[0] __SCREAMING_SNAKE_CASE = model(_A , token_type_ids=_A)[0] __SCREAMING_SNAKE_CASE = model(_A)[0] self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size]) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=_A) model.to(_A) model.eval() __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(_A) model.to(_A) model.eval() __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels]) self.check_loss_output(_A) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=_A) model.to(_A) model.eval() __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=_A) model.to(_A) model.eval() __SCREAMING_SNAKE_CASE = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( __SCREAMING_SNAKE_CASE ) = config_and_inputs __SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __lowercase : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __lowercase : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) __lowercase : Union[str, Any] = True __lowercase : Any = False __lowercase : Dict = False __lowercase : str = False __lowercase : Dict = False def snake_case_ ( self): __SCREAMING_SNAKE_CASE = DebertaModelTester(self) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=3_7) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A) @slow def snake_case_ ( self): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(_A) self.assertIsNotNone(_A) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""") def snake_case_ ( self): pass @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""") __SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]]) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A)[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4) , f"{output[:, 1:4, 1:4]}")
100
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
0
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : list): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = set_counts SCREAMING_SNAKE_CASE_ : Tuple = max(_A) SCREAMING_SNAKE_CASE_ : List[str] = len(_A) SCREAMING_SNAKE_CASE_ : Optional[Any] = [1] * num_sets SCREAMING_SNAKE_CASE_ : Tuple = list(range(_A)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_parent(_A) SCREAMING_SNAKE_CASE_ : int = self.get_parent(_A) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE_ : Any = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : Tuple = src_parent SCREAMING_SNAKE_CASE_ : List[str] = self.set_counts[src_parent] SCREAMING_SNAKE_CASE_ : List[str] = max(self.max_set , _A) return True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
91
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
0
import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowercase__ : Union[str, Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class UpperCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = _str_to_version_tuple(self.version_str ) def __repr__( self : List[str] ): """simple docstring""" return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def snake_case__ ( self : Tuple ): """simple docstring""" return self.major, self.minor, self.patch def snake_case__ ( self : List[Any] , __lowercase : Tuple ): """simple docstring""" if isinstance(_A , _A ): return Version(_A ) elif isinstance(_A , _A ): return other raise TypeError(f"{other} (type {type(_A )}) cannot be compared to version." ) def __eq__( self : List[Any] , __lowercase : Any ): """simple docstring""" try: snake_case_ = self._validate_operand(_A ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] , __lowercase : Union[str, Any] ): """simple docstring""" snake_case_ = self._validate_operand(_A ) return self.tuple < other.tuple def __hash__( self : int ): """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def snake_case__ ( cls : Optional[int] , __lowercase : List[Any] ): """simple docstring""" snake_case_ = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def snake_case__ ( self : Optional[int] ): """simple docstring""" return self.version_str def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = _VERSION_REG.match(_A ) if not res: raise ValueError(f"Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(_A ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def lowerCamelCase__ ( _A ): '''simple docstring''' return ".".join(str(_A ) for v in version_tuple )
187
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
0
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Union[str, Any] , a__ : int , a__ : Any , a__ : Dict , a__ : List[str] ) -> Optional[Any]: if index == r: for j in range(a__ ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location UpperCamelCase_ = arr[i] combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCamelCase__ ( a__ : List[Any] , a__ : int , a__ : Tuple ) -> List[Any]: UpperCamelCase_ = [0] * r # Print all combination using temporary array 'data[]' combination_util(a__ , a__ , a__ , 0 , a__ , 0 ) if __name__ == "__main__": # Driver code to check the function above _A = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
122
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[Any] = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
0
import os def __lowerCamelCase ( ): """simple docstring""" a :int = os.path.join(os.path.dirname(UpperCAmelCase_ ) , '''num.txt''' ) with open(UpperCAmelCase_ ) as file_hand: return str(sum(int(UpperCAmelCase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
94
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
0
'''simple docstring''' import argparse import struct import unittest class lowercase__ : def __init__( self : int ,lowerCamelCase__ : bytes ): '''simple docstring''' _UpperCamelCase : Dict = data # Initialize hash values _UpperCamelCase : Optional[int] = [ 0x6a09_e667, 0xbb67_ae85, 0x3c6e_f372, 0xa54f_f53a, 0x510e_527f, 0x9b05_688c, 0x1f83_d9ab, 0x5be0_cd19, ] # Initialize round constants _UpperCamelCase : Dict = [ 0x428a_2f98, 0x7137_4491, 0xb5c0_fbcf, 0xe9b5_dba5, 0x3956_c25b, 0x59f1_11f1, 0x923f_82a4, 0xab1c_5ed5, 0xd807_aa98, 0x1283_5b01, 0x2431_85be, 0x550c_7dc3, 0x72be_5d74, 0x80de_b1fe, 0x9bdc_06a7, 0xc19b_f174, 0xe49b_69c1, 0xefbe_4786, 0x0fc1_9dc6, 0x240c_a1cc, 0x2de9_2c6f, 0x4a74_84aa, 0x5cb0_a9dc, 0x76f9_88da, 0x983e_5152, 0xa831_c66d, 0xb003_27c8, 0xbf59_7fc7, 0xc6e0_0bf3, 0xd5a7_9147, 0x06ca_6351, 0x1429_2967, 0x27b7_0a85, 0x2e1b_2138, 0x4d2c_6dfc, 0x5338_0d13, 0x650a_7354, 0x766a_0abb, 0x81c2_c92e, 0x9272_2c85, 0xa2bf_e8a1, 0xa81a_664b, 0xc24b_8b70, 0xc76c_51a3, 0xd192_e819, 0xd699_0624, 0xf40e_3585, 0x106a_a070, 0x19a4_c116, 0x1e37_6c08, 0x2748_774c, 0x34b0_bcb5, 0x391c_0cb3, 0x4ed8_aa4a, 0x5b9c_ca4f, 0x682e_6ff3, 0x748f_82ee, 0x78a5_636f, 0x84c8_7814, 0x8cc7_0208, 0x90be_fffa, 0xa450_6ceb, 0xbef9_a3f7, 0xc671_78f2, ] _UpperCamelCase : str = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( lowerCamelCase__ : bytes ): '''simple docstring''' _UpperCamelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64)) _UpperCamelCase : Optional[int] = struct.pack('>Q' ,(len(_A ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' # Convert into blocks of 64 bytes _UpperCamelCase : str = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _UpperCamelCase : Optional[int] = list(struct.unpack('>16L' ,_A ) ) # add 48 0-ed integers words += [0] * 48 _UpperCamelCase : Optional[int] = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array _UpperCamelCase : Optional[int] = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) _UpperCamelCase : Any = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) _UpperCamelCase : List[Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0000_0000 # Compression _UpperCamelCase : List[str] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 ) _UpperCamelCase : Tuple = (e & f) ^ ((~e & 0xffff_ffff) & g) _UpperCamelCase : Optional[int] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0000_0000 _UpperCamelCase : Tuple = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 ) _UpperCamelCase : Optional[Any] = (a & b) ^ (a & c) ^ (b & c) _UpperCamelCase : Optional[int] = (sa + maj) % 0x1_0000_0000 _UpperCamelCase : str = ( g, f, e, ((d + tempa) % 0x1_0000_0000), c, b, a, ((tempa + tempa) % 0x1_0000_0000), ) _UpperCamelCase : Tuple = [a, b, c, d, e, f, g, h] # Modify final values _UpperCamelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0000_0000) for index, element in enumerate(self.hashes ) ] _UpperCamelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ): '''simple docstring''' return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations) class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' import hashlib _UpperCamelCase : int = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) _UpperCamelCase : Tuple = parser.parse_args() _UpperCamelCase : Optional[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _UpperCamelCase : List[Any] = f.read() else: _UpperCamelCase : Dict = bytes(UpperCAmelCase_ , 'utf-8' ) print(SHAaaa(UpperCAmelCase_ ).hash ) if __name__ == "__main__": main()
83
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: __lowercase : Optional[Any] = set() __lowercase : List[Any] = [] def parse_line(__lowerCAmelCase ): for line in fp: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowercase : Union[str, Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(__lowerCAmelCase ) > 0: __lowercase : int = '\n'.join(__lowerCAmelCase ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(__lowerCAmelCase ) buffer.clear() continue else: __lowercase : Optional[int] = line.strip() buffer.append(__lowerCAmelCase ) if from_gh: for filename in os.listdir(__lowerCAmelCase ): __lowercase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not os.path.isdir(__lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with open(__lowerCAmelCase ) as fp: parse_line(__lowerCAmelCase ) else: try: with zipfile.ZipFile(__lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(__lowerCAmelCase ) as fp: parse_line(__lowerCAmelCase ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: __lowercase : List[Any] = set() __lowercase : Dict = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(__lowerCAmelCase , __lowerCAmelCase ) ) return selected_warnings if __name__ == "__main__": def UpperCAmelCase_ ( __lowerCAmelCase ) -> str: return values.split(''',''' ) __lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) __lowerCAmelCase : Optional[Any] = parser.parse_args() __lowerCAmelCase : Tuple = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __lowerCAmelCase : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __lowerCAmelCase : int = extract_warnings(args.output_dir, args.targets) __lowerCAmelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
156
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowerCAmelCase_ ( lowercase__ ): __lowerCamelCase : Union[str, Any] = """SpeechT5FeatureExtractor""" __lowerCamelCase : Tuple = """SpeechT5Tokenizer""" def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: super().__init__(_A , _A ) def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = kwargs.pop("audio" , _A ) _lowerCAmelCase = kwargs.pop("text" , _A ) _lowerCAmelCase = kwargs.pop("text_target" , _A ) _lowerCAmelCase = kwargs.pop("audio_target" , _A ) _lowerCAmelCase = kwargs.pop("sampling_rate" , _A ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: _lowerCAmelCase = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) elif text is not None: _lowerCAmelCase = self.tokenizer(_A , **_A ) else: _lowerCAmelCase = None if audio_target is not None: _lowerCAmelCase = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A ) _lowerCAmelCase = targets['input_values'] elif text_target is not None: _lowerCAmelCase = self.tokenizer(_A , **_A ) _lowerCAmelCase = targets['input_ids'] else: _lowerCAmelCase = None if inputs is None: return targets if targets is not None: _lowerCAmelCase = labels _lowerCAmelCase = targets.get("attention_mask" ) if decoder_attention_mask is not None: _lowerCAmelCase = decoder_attention_mask return inputs def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = kwargs.pop("input_values" , _A ) _lowerCAmelCase = kwargs.pop("input_ids" , _A ) _lowerCAmelCase = kwargs.pop("labels" , _A ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: _lowerCAmelCase = self.feature_extractor.pad(_A , *_A , **_A ) elif input_ids is not None: _lowerCAmelCase = self.tokenizer.pad(_A , **_A ) else: _lowerCAmelCase = None if labels is not None: if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]): _lowerCAmelCase = self.tokenizer.pad(_A , **_A ) _lowerCAmelCase = targets['input_ids'] else: _lowerCAmelCase = self.feature_extractor.feature_size _lowerCAmelCase = self.feature_extractor.num_mel_bins _lowerCAmelCase = self.feature_extractor.pad(_A , *_A , **_A ) _lowerCAmelCase = feature_size_hack _lowerCAmelCase = targets['input_values'] else: _lowerCAmelCase = None if inputs is None: return targets if targets is not None: _lowerCAmelCase = labels _lowerCAmelCase = targets.get("attention_mask" ) if decoder_attention_mask is not None: _lowerCAmelCase = decoder_attention_mask return inputs def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: return self.tokenizer.batch_decode(*_A , **_A ) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: return self.tokenizer.decode(*_A , **_A )
158
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _A ( unittest.TestCase): def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_A ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = None ops.enable_eager_execution_internal() SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.config.list_physical_devices('CPU' ) if len(_A ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU' ) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): SCREAMING_SNAKE_CASE_ : Dict = GradientAccumulator() SCREAMING_SNAKE_CASE_ : int = tf.Variable([4.0, 3.0] ) SCREAMING_SNAKE_CASE_ : str = create_optimizer(5e-5 , 10 , 5 ) SCREAMING_SNAKE_CASE_ : Tuple = tf.Variable([0.0, 0.0] , trainable=_A ) def accumulate_on_replica(_SCREAMING_SNAKE_CASE ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with strategy.scope(): SCREAMING_SNAKE_CASE_ : Optional[Any] = strategy.experimental_local_results(_A ) local_variables[0].assign(_A ) local_variables[1].assign(_A ) strategy.run(_A , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_A ) def _check_local_values(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : List[str] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _A , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , _A , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
253
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Tuple = ["""pixel_values"""] def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384} __magic_name__ : Dict = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[Any] = do_resize __magic_name__ : str = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : int = resample __magic_name__ : List[str] = do_rescale __magic_name__ : List[Any] = rescale_factor __magic_name__ : str = do_normalize __magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __magic_name__ : Dict = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : Dict = int(shortest_edge / crop_pct ) __magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Optional[Any] = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : str = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Dict = size if size is not None else self.size __magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
331
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig A : int = logging.get_logger(__name__) A : Dict = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class __A( lowercase__ ): snake_case_ = """dpt""" def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=384 , _snake_case=16 , _snake_case=3 , _snake_case=False , _snake_case=True , _snake_case=[2, 5, 8, 11] , _snake_case="project" , _snake_case=[4, 2, 1, 0.5] , _snake_case=[96, 192, 384, 768] , _snake_case=256 , _snake_case=-1 , _snake_case=False , _snake_case=True , _snake_case=0.4 , _snake_case=255 , _snake_case=0.1 , _snake_case=[1, 1_024, 24, 24] , _snake_case=[0, 1] , _snake_case=None , **_snake_case , ) -> Tuple: '''simple docstring''' super().__init__(**_A ) __a = hidden_size __a = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) __a = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } __a = BitConfig(**_A ) elif isinstance(_A , _A ): logger.info('''Initializing the config with a `BiT` backbone.''' ) __a = BitConfig(**_A ) elif isinstance(_A , _A ): __a = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) __a = backbone_featmap_shape __a = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: __a = None __a = None __a = [] __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = initializer_range __a = layer_norm_eps __a = image_size __a = patch_size __a = num_channels __a = qkv_bias __a = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) __a = readout_type __a = reassemble_factors __a = neck_hidden_sizes __a = fusion_hidden_size __a = head_in_index __a = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __a = use_auxiliary_head __a = auxiliary_loss_weight __a = semantic_loss_ignore_index __a = semantic_classifier_dropout def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
6
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1 def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __magic_name__ : Union[str, Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
331
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class SCREAMING_SNAKE_CASE_ : """simple docstring""" __lowercase : Dict = MBartConfig __lowercase : Any = {} __lowercase : str = """gelu""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ): __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def snake_case_ ( self): __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_A , _A , _A) return config, inputs_dict def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = TFMBartModel(config=_A).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['input_ids'] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['head_mask'] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A) __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ): if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class SCREAMING_SNAKE_CASE_ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __lowercase : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __lowercase : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else () __lowercase : List[str] = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Tuple = True __lowercase : str = False __lowercase : Dict = False def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def snake_case_ ( self): __SCREAMING_SNAKE_CASE = TFMBartModelTester(self) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_A) @require_sentencepiece @require_tokenizers @require_tf class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" __lowercase : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", ] __lowercase : List[str] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] __lowercase : int = """facebook/mbart-large-en-ro""" @cached_property def snake_case_ ( self): return AutoTokenizer.from_pretrained(self.model_name) @cached_property def snake_case_ ( self): __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def snake_case_ ( self , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.translate_src_text(**_A) self.assertListEqual(self.expected_text , _A) def snake_case_ ( self , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_A , return_tensors="""tf""") __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_A , skip_special_tokens=_A) return generated_words @slow def snake_case_ ( self): self._assert_generated_batch_equal_expected()
100
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
0
"""simple docstring""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _A (__a ) -> Dict: """simple docstring""" if isinstance(__a , collections.abc.Iterable ): return x return (x, x) @require_tf class lowerCAmelCase__ : '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str , lowercase_ : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple=None , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A) SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFVisionTextDualEncoderModel(_A) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Any=None , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ : List[Any] = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A) SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any]=None , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A) SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Union[str, Any]=None , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ : int = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A) SCREAMING_SNAKE_CASE_ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A) SCREAMING_SNAKE_CASE_ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A) SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=_A , pixel_values=_A , attention_mask=_A) SCREAMING_SNAKE_CASE_ : int = after_output[0].numpy() SCREAMING_SNAKE_CASE_ : Tuple = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-5) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ : Any = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A) SCREAMING_SNAKE_CASE_ : Any = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A) SCREAMING_SNAKE_CASE_ : List[str] = output.vision_model_output.attentions self.assertEqual(len(_A) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : Tuple = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_ : List[str] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_ : Any = output.text_model_output.attentions self.assertEqual(len(_A) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.abs((a - b)).max() self.assertLessEqual(_A , _A , F'Difference between torch and flax is {diff} (>= {tol}).') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() self.check_save_load(**_A) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A) @slow def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_ : Tuple = model_a(**_A) SCREAMING_SNAKE_CASE_ : str = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A) SCREAMING_SNAKE_CASE_ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A) SCREAMING_SNAKE_CASE_ : List[str] = model_a(**_A) SCREAMING_SNAKE_CASE_ : Any = after_outputs[0].numpy() SCREAMING_SNAKE_CASE_ : Tuple = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(_A , 1e-5) @require_tf class lowerCAmelCase__ ( lowercase__ , unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''') SCREAMING_SNAKE_CASE_ : List[str] = 13 SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_ : Tuple = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TFViTModel(_A , name='''vision_model''') SCREAMING_SNAKE_CASE_ : int = TFBertModel(_A , name='''text_model''') return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = TFViTModelTester(self) SCREAMING_SNAKE_CASE_ : str = TFBertModelTester(self) SCREAMING_SNAKE_CASE_ : int = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : List[Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : str = vision_config_and_inputs ( SCREAMING_SNAKE_CASE_ ) : Optional[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( lowercase__ , unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''') SCREAMING_SNAKE_CASE_ : Any = 13 SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str=None , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_vision_text_model(_A , _A) SCREAMING_SNAKE_CASE_ : Dict = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A) SCREAMING_SNAKE_CASE_ : List[str] = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A) SCREAMING_SNAKE_CASE_ : List[str] = output.vision_model_output.attentions self.assertEqual(len(_A) , vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE_ : str = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_ : Dict = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_ : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(_A) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = TFDeiTModel(_A , name='''vision_model''') SCREAMING_SNAKE_CASE_ : str = TFRobertaModel(_A , name='''text_model''') return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = TFDeiTModelTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = TFRobertaModelTester(self) SCREAMING_SNAKE_CASE_ : Any = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Tuple = vision_config_and_inputs ( SCREAMING_SNAKE_CASE_ ) : List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( lowercase__ , unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''') SCREAMING_SNAKE_CASE_ : Tuple = 13 SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[str] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TFCLIPVisionModel(_A , name='''vision_model''') SCREAMING_SNAKE_CASE_ : Dict = TFBertModel(_A , name='''text_model''') return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = TFCLIPVisionModelTester(self) SCREAMING_SNAKE_CASE_ : Dict = TFBertModelTester(self) SCREAMING_SNAKE_CASE_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : int = vision_config_and_inputs ( SCREAMING_SNAKE_CASE_ ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=_A) SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''') SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') SCREAMING_SNAKE_CASE_ : str = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : int = model(**_A) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[1.2_28_47_27, 0.3_10_41_22]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1e-3))
91
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase :Union[str, Any] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
def lowerCamelCase__ ( _A , _A ): '''simple docstring''' return int((input_a, input_a).count(1 ) != 0 ) def lowerCamelCase__ ( ): '''simple docstring''' assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
187
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase :Any = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]: __magic_name__ : str = {} if "candidate_labels" in kwargs: __magic_name__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __magic_name__ : Tuple = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int: __magic_name__ : Dict = load_image(_A ) __magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) __magic_name__ : Optional[Any] = candidate_labels __magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels] __magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) __magic_name__ : Optional[Any] = [text_inputs] return inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str: __magic_name__ : str = model_inputs.pop('candidate_labels' ) __magic_name__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _A ): __magic_name__ : Dict = text_inputs[0] else: # Batching case. __magic_name__ : Optional[Any] = text_inputs[0][0] __magic_name__ : List[Any] = self.model(**_A , **_A ) __magic_name__ : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]: __magic_name__ : Tuple = model_outputs.pop('candidate_labels' ) __magic_name__ : Union[str, Any] = model_outputs['logits'][0] if self.framework == "pt": __magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) __magic_name__ : Tuple = probs.tolist() if not isinstance(_A , _A ): __magic_name__ : Any = [scores] elif self.framework == "tf": __magic_name__ : Any = stable_softmax(_A , axis=-1 ) __magic_name__ : Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __magic_name__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] ) ] return result
331
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
122
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase :int = '''pt''' elif is_tf_available(): lowerCAmelCase :Optional[Any] = '''tf''' else: lowerCAmelCase :Optional[Any] = '''jax''' class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = ByTaTokenizer A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: super().setUp() __magic_name__ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ : Optional[Any] = [] for i in range(len(_A ) ): try: __magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) __magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __magic_name__ : Optional[int] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __magic_name__ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ : List[str] = [t[0] for t in toks] # Ensure consistency __magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __magic_name__ : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __magic_name__ : Union[str, Any] = ' ' + output_txt __magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def __lowerCAmelCase ( self : int ) -> str: __magic_name__ : Any = self.ta_base_tokenizer __magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : Optional[int] = self.ta_base_tokenizer __magic_name__ : Optional[int] = 'Unicode €.' __magic_name__ : Optional[Any] = tokenizer(_A ) __magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : Any = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) __magic_name__ : Any = tokenizer('e è é ê ë' ) __magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding __magic_name__ : List[str] = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __lowerCAmelCase ( self : Any ) -> int: __magic_name__ : List[Any] = self.ta_base_tokenizer __magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __magic_name__ : str = list(batch.input_ids.numpy()[0] ) else: __magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = self.ta_base_tokenizer __magic_name__ : Tuple = [ 'Summary of the text.', 'Another summary.', ] __magic_name__ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __magic_name__ : str = self.ta_base_tokenizer __magic_name__ : Any = ['A long paragraph for summarization. </s>'] __magic_name__ : List[str] = ['Summary of the text. </s>'] # fmt: off __magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __magic_name__ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def __lowerCAmelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : str = tempfile.mkdtemp() __magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running' __magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ : Optional[Any] = tempfile.mkdtemp() __magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __magic_name__ : Any = tokenizer.__class__.from_pretrained(_A ) __magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: __magic_name__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Union[str, Any] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __magic_name__ : Optional[Any] = json.load(_A ) __magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )] __magic_name__ : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] __magic_name__ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ : str = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] __magic_name__ : Optional[Any] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: __magic_name__ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: pass def __lowerCAmelCase ( self : List[str] ) -> int: pass def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: pass def __lowerCAmelCase ( self : List[Any] ) -> int: pass def __lowerCAmelCase ( self : str ) -> Tuple: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __magic_name__ : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__ : List[str] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __magic_name__ : List[str] = 0 __magic_name__ : str = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
331
0
'''simple docstring''' class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : int ) -> None: '''simple docstring''' _UpperCamelCase = size _UpperCamelCase = [0] * size _UpperCamelCase = [0] * size @staticmethod def snake_case__ ( lowerCAmelCase__ : int ) -> int: '''simple docstring''' return index | (index + 1) @staticmethod def snake_case__ ( lowerCAmelCase__ : int ) -> int: '''simple docstring''' return (index & (index + 1)) - 1 def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None: '''simple docstring''' _UpperCamelCase = value while index < self.size: _UpperCamelCase = self.get_prev(_A ) + 1 if current_left_border == index: _UpperCamelCase = value else: _UpperCamelCase = max(_A , _A , _A ) _UpperCamelCase = self.get_next(_A ) def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int: '''simple docstring''' right -= 1 # Because of right is exclusive _UpperCamelCase = 0 while left <= right: _UpperCamelCase = self.get_prev(_A ) if left <= current_left: _UpperCamelCase = max(_A , self.tree[right] ) _UpperCamelCase = current_left else: _UpperCamelCase = max(_A , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
324
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ : Dict = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __lowerCAmelCase ( self : List[Any] ) -> Tuple: __magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]] __magic_name__ : List[Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 ) __magic_name__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 ) __magic_name__ : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 ) __magic_name__ : Any = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" a :int = len(UpperCAmelCase_ ) while cur > 1: # Find the maximum number in arr a :int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi a :Union[str, Any] = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase_ )] # Reverse whole list a :List[str] = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase_ )] cur -= 1 return arr if __name__ == "__main__": snake_case : List[str] = input('''Enter numbers separated by a comma:\n''').strip() snake_case : List[Any] = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
94
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase :List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase :List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Union[str, Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Tuple = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase :Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase :Tuple = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Union[str, Any] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase :Dict = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase :Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase :int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase :Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase :Any = '''''' lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase :List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ): """simple docstring""" assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): __magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ): """simple docstring""" with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : str = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): __magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) __magic_name__ : Any = expected_error.format(path=lowerCAmelCase ) with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ): ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md' with open(lowerCAmelCase , 'w+' ) as readme_file: readme_file.write(lowerCAmelCase ) ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
331
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case_ : Tuple = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Any = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int: __magic_name__ : str = parent __magic_name__ : List[Any] = 13 __magic_name__ : Union[str, Any] = 7 __magic_name__ : Tuple = True __magic_name__ : Dict = True __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = True __magic_name__ : int = 99 __magic_name__ : List[str] = 384 __magic_name__ : Optional[int] = 2 __magic_name__ : List[Any] = 4 __magic_name__ : int = 37 __magic_name__ : Union[str, Any] = 'gelu' __magic_name__ : Optional[int] = 0.1 __magic_name__ : str = 0.1 __magic_name__ : Optional[Any] = 512 __magic_name__ : Any = 16 __magic_name__ : Union[str, Any] = 2 __magic_name__ : Any = 0.02 __magic_name__ : List[str] = 3 __magic_name__ : Tuple = 4 __magic_name__ : List[Any] = 128 __magic_name__ : Optional[Any] = 2 __magic_name__ : List[str] = 9 __magic_name__ : str = 1 __magic_name__ : List[str] = None def __lowerCAmelCase ( self : List[str] ) -> List[str]: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : List[str] = None if self.use_token_type_ids: __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None __magic_name__ : int = None if self.use_labels: __magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any: __magic_name__ : Dict = TFConvBertModel(config=_A ) __magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __magic_name__ : Any = [input_ids, input_mask] __magic_name__ : Tuple = model(_A ) __magic_name__ : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]: __magic_name__ : Dict = TFConvBertForMaskedLM(config=_A ) __magic_name__ : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple: __magic_name__ : Any = self.num_labels __magic_name__ : str = TFConvBertForSequenceClassification(config=_A ) __magic_name__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[Any] = self.num_choices __magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) __magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]: __magic_name__ : List[Any] = self.num_labels __magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) __magic_name__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int: __magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A ) __magic_name__ : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __magic_name__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[str] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : str = config_and_inputs __magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : List[str] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : Tuple = False A_ : Any = False A_ : List[Any] = False def __lowerCAmelCase ( self : List[Any] ) -> int: __magic_name__ : Optional[Any] = TFConvBertModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : str ) -> Dict: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : int ) -> Any: __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True __magic_name__ : Any = True if hasattr(_A , 'use_cache' ): __magic_name__ : List[Any] = True __magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A ) for model_class in self.all_model_classes: __magic_name__ : List[str] = self._prepare_for_class(_A , _A ) __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Tuple = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) __magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' ) __magic_name__ : Optional[int] = tf.keras.models.load_model(_A ) __magic_name__ : Optional[Any] = model(_A ) if self.is_encoder_decoder: __magic_name__ : Optional[int] = outputs['encoder_hidden_states'] __magic_name__ : Tuple = outputs['encoder_attentions'] else: __magic_name__ : Union[str, Any] = outputs['hidden_states'] __magic_name__ : Optional[Any] = outputs['attentions'] self.assertEqual(len(_A ) , _A ) __magic_name__ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ) , _A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_A ) def __lowerCAmelCase ( self : List[str] ) -> Any: __magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : str = True __magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A ) __magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A ) def check_decoder_attentions_output(_A : List[Any] ): __magic_name__ : Tuple = len(_A ) self.assertEqual(out_len % 2 , 0 ) __magic_name__ : Any = outputs.decoder_attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_A : int ): __magic_name__ : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : Tuple = False __magic_name__ : List[str] = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) __magic_name__ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: __magic_name__ : Any = model_class(_A ) __magic_name__ : Any = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Optional[int] = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : str = True __magic_name__ : Optional[int] = model_class(_A ) __magic_name__ : str = model(self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) ) self.assertEqual(model.config.output_hidden_states , _A ) check_encoder_attentions_output(_A ) @require_tf class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : int ) -> int: __magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Tuple = model(_A )[0] __magic_name__ : str = [1, 6, 768] self.assertEqual(output.shape , _A ) __magic_name__ : Tuple = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
331
0
import os def UpperCAmelCase_ ( ) -> Any: __lowercase : Any = os.path.dirname(os.path.realpath(__lowerCAmelCase ) ) __lowercase : str = os.path.join(__lowerCAmelCase , '''triangle.txt''' ) with open(__lowerCAmelCase ) as f: __lowercase : int = f.readlines() __lowercase : Union[str, Any] = [] for line in triangle: __lowercase : List[str] = [] for number in line.strip().split(''' ''' ): numbers_from_line.append(int(__lowerCAmelCase ) ) a.append(__lowerCAmelCase ) for i in range(1 , len(__lowerCAmelCase ) ): for j in range(len(a[i] ) ): __lowercase : Tuple = a[i - 1][j] if j != len(a[i - 1] ) else 0 __lowercase : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__lowerCAmelCase , __lowerCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
156
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowerCAmelCase :Dict = pytest.mark.integration @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : List[str] ) -> Tuple: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() __magic_name__ : Union[str, Any] = dset.map( lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A ) __magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def __lowerCAmelCase ( self : Any ) -> str: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Tuple ) -> int: import faiss __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: from elasticsearch import Elasticsearch __magic_name__ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : int = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __magic_name__ : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_A ) __magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> List[Any]: import faiss __magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __magic_name__ : str = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Optional[int] = 1 __magic_name__ , __magic_name__ : str = index.search(_A ) self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __magic_name__ , __magic_name__ : str = index.search_batch(_A ) self.assertRaises(_A , index.search_batch , queries[0] ) __magic_name__ : List[Any] = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _A ) def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: import faiss __magic_name__ : str = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __magic_name__ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_A ): __magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: import faiss __magic_name__ : Any = faiss.IndexFlat(5 ) __magic_name__ : Optional[Any] = FaissIndex(custom_index=_A ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : Dict ) -> Tuple: import faiss __magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file: index.save(tmp_file.name ) __magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __magic_name__ : Dict = np.zeros(5 , dtype=np.floataa ) __magic_name__ : Tuple = 1 __magic_name__ , __magic_name__ : Optional[Any] = index.search(_A ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" import faiss __magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __magic_name__ : Dict = 'index.faiss' __magic_name__ : Optional[Any] = f'mock://{index_name}' index.save(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options ) __magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __magic_name__ : List[str] = 1 __magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Dict: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __magic_name__ : Any = Elasticsearch() __magic_name__ : Union[str, Any] = {'acknowledged': True} __magic_name__ : Tuple = ElasticSearchIndex(es_client=_A ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __magic_name__ : str = 'foo' __magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __magic_name__ : str = 'foo' __magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A ) __magic_name__ : Tuple = [scores[0] for scores in total_scores] __magic_name__ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A ) # batched queries with timeout __magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar'] __magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 ) __magic_name__ : Optional[int] = [scores[0] for scores in total_scores] __magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_A ) , 0 ) self.assertListEqual([1, 1, 1] , _A )
331
0
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar _SCREAMING_SNAKE_CASE = TypeVar("T") def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return (position - 1) // 2 def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return (2 * position) + 1 def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return (2 * position) + 2 class lowerCAmelCase_ ( Generic[T] ): def __init__( self ) -> None: _lowerCAmelCase = [] _lowerCAmelCase = {} _lowerCAmelCase = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _snake_case ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _lowerCAmelCase = self.elements self.elements += 1 self._bubble_up(_A ) def _snake_case ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _lowerCAmelCase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _lowerCAmelCase = self.heap[0] self._bubble_down(_A ) return elem def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None: # Update the weight of the given key _lowerCAmelCase = self.position_map[elem] _lowerCAmelCase = (elem, weight) if position > 0: _lowerCAmelCase = get_parent_position(_A ) _lowerCAmelCase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_A ) else: self._bubble_down(_A ) else: self._bubble_down(_A ) def _snake_case ( self , _lowerCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _lowerCAmelCase = self.position_map[elem] if curr_pos == 0: return None _lowerCAmelCase = get_parent_position(_A ) _lowerCAmelCase = self.heap[curr_pos] _lowerCAmelCase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_A , _A ) return self._bubble_up(_A ) return None def _snake_case ( self , _lowerCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _lowerCAmelCase = self.position_map[elem] _lowerCAmelCase = self.heap[curr_pos] _lowerCAmelCase = get_child_left_position(_A ) _lowerCAmelCase = get_child_right_position(_A ) if child_left_position < self.elements and child_right_position < self.elements: _lowerCAmelCase = self.heap[child_left_position] _lowerCAmelCase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) if child_left_position < self.elements: _lowerCAmelCase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) else: return None if child_right_position < self.elements: _lowerCAmelCase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) return None def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None: # Swap the nodes at the given positions _lowerCAmelCase = self.heap[nodea_pos][0] _lowerCAmelCase = self.heap[nodea_pos][0] _lowerCAmelCase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _lowerCAmelCase = nodea_pos _lowerCAmelCase = nodea_pos class lowerCAmelCase_ ( Generic[T] ): def __init__( self ) -> None: _lowerCAmelCase = {} _lowerCAmelCase = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _snake_case ( self , _lowerCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _lowerCAmelCase = {} self.nodes += 1 def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_A ) self.add_node(_A ) _lowerCAmelCase = weight _lowerCAmelCase = weight def __a(SCREAMING_SNAKE_CASE_ : GraphUndirectedWeighted[T] , ): '''simple docstring''' _lowerCAmelCase = {node: maxsize for node in graph.connections} _lowerCAmelCase = {node: None for node in graph.connections} _lowerCAmelCase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if priority_queue.is_empty(): return dist, parent # initialization _lowerCAmelCase = priority_queue.extract_min() _lowerCAmelCase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] ) _lowerCAmelCase = node # running prim's algorithm while not priority_queue.is_empty(): _lowerCAmelCase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] ) _lowerCAmelCase = node return dist, parent
158
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() ) __magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ): """simple docstring""" if metric == "rouge2": __magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __magic_name__ : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __magic_name__ : List[Any] = ModelCheckpoint( dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int: __magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None: logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) __magic_name__ : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": __magic_name__ : List[Any] = od / 'test_results.txt' __magic_name__ : Dict = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt' __magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , 'a+' ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __magic_name__ : Optional[Any] = metrics[key] if isinstance(_A , torch.Tensor ): __magic_name__ : Tuple = val.item() __magic_name__ : int = F'{key}: {val:.6f}\n' writer.write(_A ) if not save_generations: return if "preds" in metrics: __magic_name__ : str = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_A ) @rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple: try: __magic_name__ : str = pl_module.model.model.num_parameters() except AttributeError: __magic_name__ : List[str] = pl_module.model.num_parameters() __magic_name__ : List[Any] = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
331
0
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _A ( tf.keras.layers.Layer): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Any = max_length SCREAMING_SNAKE_CASE_ : Tuple = vocab SCREAMING_SNAKE_CASE_ : List[Any] = merges SCREAMING_SNAKE_CASE_ : str = BytePairTokenizer(_A , _A , sequence_length=_A ) @classmethod def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [' '.join(_A ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : Dict = tokenizer.get_vocab() return cls(_A , _A , *_A , **_A ) @classmethod def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(_A , *_A , **_A ) return cls.from_tokenizer(_A , *_A , **_A ) @classmethod def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE ): """simple docstring""" return cls(**_A ) def UpperCAmelCase ( self ): """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.tf_tokenizer(_A ) SCREAMING_SNAKE_CASE_ : Any = tf.ones_like(_A ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ : List[Any] = pad_model_inputs( _A , max_seq_length=_A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
253
'''simple docstring''' def lowerCamelCase ( ): """simple docstring""" return 1 def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int ): """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int = 200 ): """simple docstring""" return two_pound(lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
331
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever A : List[Any] = logging.getLogger(__name__) class __A( lowercase__ ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> int: '''simple docstring''' super().__init__( _A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , ) __a = None def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __a = self._infer_socket_ifname() # avoid clash with the NCCL port __a = str(distributed_port + 1 ) __a = dist.new_group(ranks=_A , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=torch.floataa ) -> str: '''simple docstring''' __a = torch.empty(_A , dtype=_A ) dist.scatter(_A , src=0 , scatter_list=_A , group=self.process_group ) return target_tensor def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __a = next((addr for addr in addrs if addr.startswith('''e''' )) , _A ) return ifname def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple[np.ndarray, List[dict]]: '''simple docstring''' if not dist.is_initialized(): __a = self._main_retrieve(_A , _A ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A ) # distributed training __a = dist.get_world_size(group=self.process_group ) # gather logic __a = None if self._is_main(): __a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_A )] dist.gather(torch.tensor(_A ) , dst=0 , gather_list=_A , group=self.process_group ) # scatter logic __a = question_hidden_states.shape[0] __a = [] __a = [] if self._is_main(): assert len(_A ) == world_size __a = self._main_retrieve(torch.cat(_A ).numpy() , _A ) __a = torch.tensor(_A ), torch.tensor(_A ) __a = self._chunk_tensor(_A , _A ) __a = self._chunk_tensor(_A , _A ) __a = self._scattered(_A , [n_queries, n_docs] , target_type=torch.intaa ) __a = self._scattered(_A , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_A )
6
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Union[str, Any] = ["""flax""", """transformers"""] def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Dict = ["""flax""", """transformers"""] def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] ) class _lowerCamelCase ( metaclass=lowercase__ ): '''simple docstring''' A_ : Optional[int] = ["""flax""", """transformers"""] def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]: requires_backends(self , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict: requires_backends(cls , ['flax', 'transformers'] ) @classmethod def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]: requires_backends(cls , ['flax', 'transformers'] )
331
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { '''configuration_blip_2''': [ '''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Blip2Config''', '''Blip2QFormerConfig''', '''Blip2VisionConfig''', ], '''processing_blip_2''': ['''Blip2Processor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Blip2Model''', '''Blip2QFormerModel''', '''Blip2PreTrainedModel''', '''Blip2ForConditionalGeneration''', '''Blip2VisionModel''', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
100
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase :Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any: super().__init__(*_A , **_A ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]: __magic_name__ : Union[str, Any] = {} __magic_name__ : Optional[Any] = {} if prompt is not None: __magic_name__ : Union[str, Any] = prompt if generate_kwargs is not None: __magic_name__ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __magic_name__ : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) __magic_name__ : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int: return super().__call__(_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict: __magic_name__ : List[Any] = load_image(_A ) if prompt is not None: if not isinstance(_A , _A ): raise ValueError( F'Received an invalid text input, got - {type(_A )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) __magic_name__ : Any = self.model.config.model_type if model_type == "git": __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids __magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids __magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": __magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework ) __magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework ) model_inputs.update(_A ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __magic_name__ : int = None return model_inputs def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _A ) and all(x is None for x in model_inputs['input_ids'] ) ): __magic_name__ : str = None if generate_kwargs is None: __magic_name__ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) __magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A ) return model_outputs def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]: __magic_name__ : Optional[Any] = [] for output_ids in model_outputs: __magic_name__ : Union[str, Any] = { 'generated_text': self.tokenizer.decode( _A , skip_special_tokens=_A , ) } records.append(_A ) return records
331
0
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _A (__a , __a , __a , __a , __a = None , __a = None , __a = None , ) -> Optional[Any]: """simple docstring""" if config_name_or_path is None: SCREAMING_SNAKE_CASE_ : Any = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE_ : Optional[int] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE_ : Dict = question_encoder_name_or_path SCREAMING_SNAKE_CASE_ : Optional[int] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. SCREAMING_SNAKE_CASE_ : Optional[int] = RagConfig.from_pretrained(__a ) SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(__a ) SCREAMING_SNAKE_CASE_ : List[str] = gen_config SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config SCREAMING_SNAKE_CASE_ : Tuple = model_class.from_pretrained_question_encoder_generator( __a , __a , config=__a ) rag_model.save_pretrained(__a ) # Sanity check. model_class.from_pretrained(__a ) # Save tokenizers. SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(__a ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(__a ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) UpperCAmelCase_ : Any = parser.parse_args() UpperCAmelCase_ : Any = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
91
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowerCAmelCase :Dict = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') lowerCAmelCase :str = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowerCAmelCase :Any = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys()) lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class _lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) __magic_name__ : List[str] = 0 __magic_name__ : Union[str, Any] = Path(self.hparams.output_dir ) __magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __magic_name__ : Optional[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: __magic_name__ : PretrainedConfig = config __magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: __magic_name__ : List[Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: __magic_name__ : PreTrainedTokenizer = tokenizer __magic_name__ : Optional[int] = MODEL_MODES[mode] if model is None: __magic_name__ : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: __magic_name__ : str = model def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple: __magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: __magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] __magic_name__ : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : Optional[Any] = self.model __magic_name__ : int = ['bias', 'LayerNorm.weight'] __magic_name__ : Dict = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __magic_name__ : str = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: __magic_name__ : Tuple = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __magic_name__ : List[str] = optimizer __magic_name__ : int = self.get_lr_scheduler() return [optimizer], [scheduler] def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]: return self.validation_step(_A , _A ) def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any: return self.validation_end(_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str: if stage == "test": __magic_name__ : Any = len(self.test_dataloader().dataset ) else: __magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) __magic_name__ : int = len(self.train_dataloader().dataset ) def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]: raise NotImplementedError('You must implement this for your task' ) def __lowerCAmelCase ( self : int ) -> List[str]: return self.train_loader def __lowerCAmelCase ( self : Tuple ) -> int: return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str: return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None: __magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' ) __magic_name__ : List[Any] = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple: parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class _lowerCamelCase ( pl.Callback ): '''simple docstring''' def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]: __magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler'] __magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]: rank_zero_info('***** Validation results *****' ) __magic_name__ : str = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]: rank_zero_info('***** Test results *****' ) __magic_name__ : Optional[int] = trainer.callback_metrics # Log and save results to file __magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): """simple docstring""" parser.add_argument( '--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __magic_name__ : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: __magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase ) if logging_callback is None: __magic_name__ : Dict = LoggingCallback() __magic_name__ : List[str] = {} if args.fpaa: __magic_name__ : Dict = 16 if args.gpus > 1: __magic_name__ : Tuple = 'auto' __magic_name__ : int = 'ddp' __magic_name__ : str = args.accumulate_grad_batches __magic_name__ : str = None __magic_name__ : List[str] = 'auto' __magic_name__ : List[Any] = pl.Trainer.from_argparse_args( lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , ) if args.do_train: trainer.fit(lowerCAmelCase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
331
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase__ : Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
187
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = (DDPMScheduler,) def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str: __magic_name__ : str = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_A ) return config def __lowerCAmelCase ( self : str ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> str: self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Dict = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __lowerCAmelCase ( self : Tuple ) -> int: __magic_name__ : Tuple = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : str = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Union[str, Any] = self.dummy_model() __magic_name__ : List[Any] = self.dummy_sample_deter __magic_name__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : Tuple = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : Dict = pred_prev_sample __magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) ) __magic_name__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ) -> Optional[int]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) __magic_name__ : Any = scheduler_class(**_A ) __magic_name__ : Any = len(_A ) __magic_name__ : Dict = self.dummy_model() __magic_name__ : str = self.dummy_sample_deter __magic_name__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual __magic_name__ : List[Any] = model(_A , _A ) # 2. predict previous mean of sample x_t-1 __magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __magic_name__ : List[Any] = pred_prev_sample __magic_name__ : int = torch.sum(torch.abs(_A ) ) __magic_name__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ) -> str: __magic_name__ : Dict = self.scheduler_classes[0] __magic_name__ : Any = self.get_scheduler_config() __magic_name__ : Optional[Any] = scheduler_class(**_A ) __magic_name__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) __magic_name__ : List[str] = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: __magic_name__ : Optional[int] = -1 else: __magic_name__ : List[Any] = timesteps[i + 1] __magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A ) __magic_name__ : Any = prev_t.item() self.assertEqual(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> str: __magic_name__ : str = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_A ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Union[str, Any] = self.scheduler_classes[0] __magic_name__ : Union[str, Any] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Optional[int] = [100, 87, 50, 1, 0] __magic_name__ : Tuple = len(_A ) with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.scheduler_classes[0] __magic_name__ : List[str] = self.get_scheduler_config() __magic_name__ : Union[str, Any] = scheduler_class(**_A ) __magic_name__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_A )
331
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase_ : def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=3_2 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ): """simple docstring""" UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_token_type_ids UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = type_vocab_size UpperCamelCase_ = type_sequence_label_size UpperCamelCase_ = initializer_range UpperCamelCase_ = num_labels UpperCamelCase_ = num_choices UpperCamelCase_ = scope UpperCamelCase_ = self.vocab_size - 1 def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = None if self.use_token_type_ids: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = None if self.use_labels: UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) UpperCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = OpenAIGPTModel(config=_A ) model.to(_A ) model.eval() UpperCamelCase_ = model(_A , token_type_ids=_A , head_mask=_A ) UpperCamelCase_ = model(_A , token_type_ids=_A ) UpperCamelCase_ = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = OpenAIGPTLMHeadModel(_A ) model.to(_A ) model.eval() UpperCamelCase_ = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = OpenAIGPTDoubleHeadsModel(_A ) model.to(_A ) model.eval() UpperCamelCase_ = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self.num_labels UpperCamelCase_ = OpenAIGPTForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase_ = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.prepare_config_and_inputs() ( UpperCamelCase_ ) = config_and_inputs UpperCamelCase_ = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class lowercase_ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): A__ : Any = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ : str = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ : Optional[Any] = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" UpperCamelCase_ = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": UpperCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , ) UpperCamelCase_ = inputs_dict['labels'] UpperCamelCase_ = inputs_dict['labels'] UpperCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , ) UpperCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = OpenAIGPTModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_A , n_embd=3_7 ) def lowerCamelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_A ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_A ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A ) @slow def lowerCamelCase_ ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ = OpenAIGPTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class lowercase_ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_A ) UpperCamelCase_ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=_A ) # the president is UpperCamelCase_ = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the UpperCamelCase_ = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].tolist() , _A )
122
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=13 , lowerCAmelCase__ : Any=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[int]=37 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Any=None , ) -> Tuple: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = patch_size _UpperCamelCase = num_channels _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = (image_size // patch_size) ** 2 _UpperCamelCase = num_patches + 1 def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = TFViTModel(config=_A ) _UpperCamelCase = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. _UpperCamelCase = self.image_size // 2 _UpperCamelCase = pixel_values[:, :, :image_size, :image_size] _UpperCamelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) _UpperCamelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> int: '''simple docstring''' _UpperCamelCase = self.type_sequence_label_size _UpperCamelCase = TFViTForImageClassification(_A ) _UpperCamelCase = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. _UpperCamelCase = self.image_size // 2 _UpperCamelCase = pixel_values[:, :, :image_size, :image_size] _UpperCamelCase = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase = 1 _UpperCamelCase = TFViTForImageClassification(_A ) _UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_and_inputs _UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" _snake_case : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _snake_case : Any = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Optional[Any] = False _snake_case : str = False def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = TFViTModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def snake_case__ ( self : Optional[int] ) -> str: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def snake_case__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def snake_case__ ( self : List[str] ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def snake_case__ ( self : Dict ) -> int: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) _UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def snake_case__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : int ) -> Dict: '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass _UpperCamelCase = model(**_A ) # verify the logits _UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) _UpperCamelCase = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
324
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict: __magic_name__ : Union[str, Any] = parent __magic_name__ : Any = batch_size __magic_name__ : Optional[int] = seq_length __magic_name__ : List[str] = is_training __magic_name__ : Optional[Any] = use_input_mask __magic_name__ : Dict = use_token_type_ids __magic_name__ : str = use_labels __magic_name__ : int = vocab_size __magic_name__ : List[Any] = hidden_size __magic_name__ : Dict = num_hidden_layers __magic_name__ : Dict = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_act __magic_name__ : Union[str, Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : List[Any] = max_position_embeddings __magic_name__ : Any = type_vocab_size __magic_name__ : Union[str, Any] = type_sequence_label_size __magic_name__ : Union[str, Any] = initializer_range __magic_name__ : str = num_labels __magic_name__ : Tuple = num_choices __magic_name__ : Any = relative_attention __magic_name__ : str = position_biased_input __magic_name__ : str = pos_att_type __magic_name__ : Union[str, Any] = scope def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : List[Any] = None if self.use_input_mask: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ : int = None if self.use_token_type_ids: __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : List[str] = None __magic_name__ : Tuple = None __magic_name__ : Union[str, Any] = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self : str ) -> Optional[Any]: __magic_name__ : List[Any] = self.get_config() __magic_name__ : Union[str, Any] = 300 return config def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]: __magic_name__ : Dict = DebertaModel(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0] __magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0] __magic_name__ : List[str] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict: __magic_name__ : List[str] = DebertaForMaskedLM(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]: __magic_name__ : Optional[int] = self.num_labels __magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A ) model.to(_A ) model.eval() __magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]: __magic_name__ : str = self.num_labels __magic_name__ : int = DebertaForTokenClassification(config=_A ) model.to(_A ) model.eval() __magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]: __magic_name__ : int = DebertaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __magic_name__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Tuple = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) A_ : Union[str, Any] = True A_ : Any = False A_ : Dict = False A_ : str = False A_ : Dict = False def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: __magic_name__ : List[str] = DebertaModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def __lowerCAmelCase ( self : Any ) -> str: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def __lowerCAmelCase ( self : Any ) -> Tuple: __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def __lowerCAmelCase ( self : str ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) @slow def __lowerCAmelCase ( self : str ) -> Optional[Any]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : int = DebertaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: pass @slow def __lowerCAmelCase ( self : Dict ) -> Tuple: __magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' ) __magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. __magic_name__ : Tuple = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
331
0
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class _snake_case ( lowercase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # We have a SentencePiece fixture for testing a :Optional[Any] = XLMProphetNetTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = '[PAD]' a :Optional[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 1012 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = XLMProphetNetTokenizer(_A , keep_accents=_A ) a :List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a :int = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) a :Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = 'Hello World!' a :List[Any] = [3_5389, 6672, 49, 2] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off a :Dict = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
94
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: __magic_name__ : Tuple = row __magic_name__ : str = col __magic_name__ : Optional[Any] = graph def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None: # Checking all 8 elements surrounding nth element __magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : Optional[int] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A ) def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : Any = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_A , _A , _A ) count += 1 return count
331
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowercase__ : lowercase__ = None lowercase__ = None lowercase__ = None # sigma(t_i) @classmethod def UpperCamelCase_ ( cls : Any ): '''simple docstring''' return cls() @dataclass class lowercase__ ( lowercase__ ): lowercase__ = 42 lowercase__ = 42 lowercase__ = 42 class lowercase__ ( lowercase__ , lowercase__ ): @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return True @register_to_config def __init__( self : Union[str, Any] ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 100 ,lowerCamelCase__ : float = 1.0_0_7 ,lowerCamelCase__ : float = 80 ,lowerCamelCase__ : float = 0.0_5 ,lowerCamelCase__ : float = 50 ,): '''simple docstring''' pass def UpperCamelCase_ ( self : int ): '''simple docstring''' return KarrasVeSchedulerState.create() def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : KarrasVeSchedulerState ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple = () ): '''simple docstring''' _UpperCamelCase : int = jnp.arange(0 ,_A )[::-1].copy() _UpperCamelCase : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_A ,schedule=jnp.array(_A ,dtype=jnp.floataa ) ,timesteps=_A ,) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : KarrasVeSchedulerState ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : random.KeyArray ,): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: _UpperCamelCase : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 ) else: _UpperCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) _UpperCamelCase : Any = random.split(_A ,num=1 ) _UpperCamelCase : Optional[int] = self.config.s_noise * random.normal(key=_A ,shape=sample.shape ) _UpperCamelCase : Any = sigma + gamma * sigma _UpperCamelCase : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : KarrasVeSchedulerState ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : float ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : bool = True ,): '''simple docstring''' _UpperCamelCase : Union[str, Any] = sample_hat + sigma_hat * model_output _UpperCamelCase : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat _UpperCamelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A ,derivative=_A ,state=_A ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : KarrasVeSchedulerState ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : float ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : jnp.ndarray ,lowerCamelCase__ : bool = True ,): '''simple docstring''' _UpperCamelCase : Optional[Any] = sample_prev + sigma_prev * model_output _UpperCamelCase : Tuple = (sample_prev - pred_original_sample) / sigma_prev _UpperCamelCase : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A ,derivative=_A ,state=_A ) def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : KarrasVeSchedulerState ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ): '''simple docstring''' raise NotImplementedError()
83
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :str = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
331
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : str , _snake_case : List[str] , _snake_case : Optional[Any]=13 , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=3 , _snake_case : int=4 , _snake_case : Optional[Any]=[10, 20, 30, 40] , _snake_case : Optional[Any]=[2, 2, 3, 2] , _snake_case : int=True , _snake_case : Optional[int]=True , _snake_case : Union[str, Any]=37 , _snake_case : Optional[int]="gelu" , _snake_case : Any=10 , _snake_case : List[str]=0.02 , _snake_case : List[Any]=["stage2", "stage3", "stage4"] , _snake_case : List[Any]=3 , _snake_case : str=None , ): __lowercase : Dict = parent __lowercase : Union[str, Any] = batch_size __lowercase : Tuple = image_size __lowercase : Optional[int] = num_channels __lowercase : int = num_stages __lowercase : Tuple = hidden_sizes __lowercase : List[Any] = depths __lowercase : List[Any] = is_training __lowercase : List[str] = use_labels __lowercase : Dict = intermediate_size __lowercase : int = hidden_act __lowercase : Union[str, Any] = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : Union[str, Any] = out_features __lowercase : List[Any] = num_labels __lowercase : Any = scope __lowercase : Any = num_stages def snake_case_ ( self : List[str] ): __lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : Union[str, Any] = None if self.use_labels: __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Optional[int] = self.get_config() return config, pixel_values, labels def snake_case_ ( self : Dict ): return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case_ ( self : List[str] ): return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , ) def snake_case_ ( self : Optional[int] , _snake_case : int , _snake_case : Any , _snake_case : List[str] ): __lowercase : Tuple = UperNetForSemanticSegmentation(config=_A ) model.to(_A ) model.eval() __lowercase : str = model(_A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case_ ( self : Union[str, Any] ): __lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( __lowercase ) : Dict = config_and_inputs __lowercase : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" A__ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else () A__ : Optional[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {} A__ : List[Any] = False A__ : List[Any] = False A__ : int = False A__ : Optional[Any] = False A__ : Optional[int] = False A__ : str = False def snake_case_ ( self : Any ): __lowercase : List[str] = UperNetModelTester(self ) __lowercase : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def snake_case_ ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self : List[Any] ): return def snake_case_ ( self : Any ): __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Union[str, Any] = model_class(_A ) __lowercase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : Tuple = [*signature.parameters.keys()] __lowercase : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def snake_case_ ( self : Tuple ): __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case_ ( self : Any ): pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case_ ( self : Optional[Any] ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case_ ( self : Union[str, Any] ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case_ ( self : Dict ): pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case_ ( self : Any ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case_ ( self : Optional[Any] ): pass def snake_case_ ( self : str ): def check_hidden_states_output(_snake_case : int , _snake_case : Union[str, Any] , _snake_case : int ): __lowercase : str = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): __lowercase : Dict = model(**self._prepare_for_class(_A , _A ) ) __lowercase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase : str = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : str = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : List[Any] = True check_hidden_states_output(_A , _A , _A ) def snake_case_ ( self : List[Any] ): __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Any = _config_zero_init(_A ) __lowercase : str = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(config=_A ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case_ ( self : int ): pass @slow def snake_case_ ( self : Union[str, Any] ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCAmelCase_ ( ) -> Tuple: __lowercase : List[str] = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) __lowercase : List[str] = Image.open(__lowerCAmelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self : Optional[Any] ): __lowercase : Optional[int] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) __lowercase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A ) __lowercase : Union[str, Any] = prepare_img() __lowercase : List[Any] = processor(images=_A , return_tensors='''pt''' ).to(_A ) with torch.no_grad(): __lowercase : List[str] = model(**_A ) __lowercase : Any = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _A ) __lowercase : Optional[int] = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) ) def snake_case_ ( self : str ): __lowercase : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) __lowercase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A ) __lowercase : Optional[int] = prepare_img() __lowercase : List[str] = processor(images=_A , return_tensors='''pt''' ).to(_A ) with torch.no_grad(): __lowercase : Any = model(**_A ) __lowercase : int = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _A ) __lowercase : int = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) )
156
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __a(SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __a(): '''simple docstring''' _lowerCAmelCase = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Parse args _lowerCAmelCase = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) _lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ ) # Run _lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
158
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0