code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger _SCREAMING_SNAKE_CASE : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: ' _SCREAMING_SNAKE_CASE : Union[str, Any] = '=======\n>>>>>>>\n' _SCREAMING_SNAKE_CASE : Optional[int] = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] _SCREAMING_SNAKE_CASE : Optional[Any] = [ # (pattern, replacement) # Order is important here for some replacements (r'tfds\.core', r'datasets'), (r'tf\.io\.gfile\.GFile', r'open'), (r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'), (r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'), (r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'), (r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('), (r'tfds\.features\.FeaturesDict\(', r'dict('), (r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (r'tfds\.', r'datasets.'), (r'dl_manager\.manual_dir', r'self.config.data_dir'), (r'self\.builder_config', r'self.config'), ] def UpperCAmelCase_ ( _A ): '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase__ ( A__ ): """simple docstring""" @staticmethod def lowercase_ ( __lowerCamelCase : Tuple ) -> int: SCREAMING_SNAKE_CASE__ = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_lowerCAmelCase ) def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : List[str] , *__lowerCamelCase : Optional[int] ) -> int: SCREAMING_SNAKE_CASE__ = get_logger('''datasets-cli/converting''' ) SCREAMING_SNAKE_CASE__ = tfds_path SCREAMING_SNAKE_CASE__ = datasets_directory def lowercase_ ( self : Dict ) -> int: if os.path.isdir(self._tfds_path ): SCREAMING_SNAKE_CASE__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): SCREAMING_SNAKE_CASE__ = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) SCREAMING_SNAKE_CASE__ = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = {} if os.path.isdir(self._tfds_path ): SCREAMING_SNAKE_CASE__ = os.listdir(_lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) SCREAMING_SNAKE_CASE__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) if not os.path.isfile(_lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_lowerCAmelCase , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE__ = f.readlines() SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = [] for line in lines: SCREAMING_SNAKE_CASE__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: SCREAMING_SNAKE_CASE__ = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here SCREAMING_SNAKE_CASE__ = '''''' continue elif "from absl import logging" in out_line: SCREAMING_SNAKE_CASE__ = '''from datasets import logging\n''' elif "getLogger" in out_line: SCREAMING_SNAKE_CASE__ = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = list(filter(lambda __lowerCamelCase : e in out_line , _lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCAmelCase ) + '''\n''' ) out_lines.append(_lowerCAmelCase ) out_lines.append(_lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: SCREAMING_SNAKE_CASE__ = re.sub(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: SCREAMING_SNAKE_CASE__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) SCREAMING_SNAKE_CASE__ = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: SCREAMING_SNAKE_CASE__ = True out_lines.append(_lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset SCREAMING_SNAKE_CASE__ = f_name.replace('''.py''' , '''''' ) SCREAMING_SNAKE_CASE__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_lowerCAmelCase ) if needs_manual_update: with_manual_update.append(_lowerCAmelCase ) with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_lowerCAmelCase ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: SCREAMING_SNAKE_CASE__ = os.path.basename(_lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(_lowerCAmelCase , _lowerCAmelCase ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
493
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
513
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCamelCase : List[Any] = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
405
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : int ="gpt_bigcode" UpperCAmelCase_ : Tuple =["past_key_values"] UpperCAmelCase_ : Union[str, Any] ={ "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , UpperCAmelCase=50257 , UpperCAmelCase=1024 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=None , UpperCAmelCase="gelu_pytorch_tanh" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1E-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=50256 , UpperCAmelCase=50256 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , **UpperCAmelCase , ) -> Dict: '''simple docstring''' __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_positions __snake_case : int = n_embd __snake_case : Optional[Any] = n_layer __snake_case : Optional[Any] = n_head __snake_case : Any = n_inner __snake_case : int = activation_function __snake_case : Union[str, Any] = resid_pdrop __snake_case : str = embd_pdrop __snake_case : Optional[Any] = attn_pdrop __snake_case : str = layer_norm_epsilon __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scale_attn_weights __snake_case : Optional[int] = use_cache __snake_case : List[Any] = attention_softmax_in_fpaa __snake_case : Optional[int] = scale_attention_softmax_in_fpaa __snake_case : Optional[int] = multi_query __snake_case : str = bos_token_id __snake_case : Dict = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
243
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
0
'''simple docstring''' import re import string import numpy as np import datasets _lowercase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' _lowercase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' _lowercase = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def lowercase__ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def lowercase__ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=False , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: __snake_case = np.array([re.sub(_lowerCAmelCase , '' , _lowerCAmelCase ) for x in predictions] ) __snake_case = np.array([re.sub(_lowerCAmelCase , '' , _lowerCAmelCase ) for x in references] ) else: __snake_case = np.asarray(_lowerCAmelCase ) __snake_case = np.asarray(_lowerCAmelCase ) if ignore_case: __snake_case = np.char.lower(_lowerCAmelCase ) __snake_case = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: __snake_case = string.punctuation.maketrans('' , '' , string.punctuation ) __snake_case = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) __snake_case = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: __snake_case = string.digits.maketrans('' , '' , string.digits ) __snake_case = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) __snake_case = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) __snake_case = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 1_0_0}
356
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
0
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" if isinstance(__lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class _lowerCamelCase : '''simple docstring''' def __lowerCAmelCase ( self : Dict , _A : int , _A : Optional[int] ) -> str: pass def __lowerCAmelCase ( self : List[Any] ) -> List[str]: pass def __lowerCAmelCase ( self : Dict ) -> List[str]: pass def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Tuple , _A : List[str] ) -> List[Any]: __magic_name__ : Any = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def __lowerCAmelCase ( self : Dict , _A : Any , _A : List[Any] , _A : int , _A : List[Any] , _A : Union[str, Any]=None , **_A : Optional[Any] ) -> int: __magic_name__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) __magic_name__ : Any = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Dict , _A : int , _A : List[Any] , _A : Dict=None , **_A : List[str] ) -> Tuple: __magic_name__ , __magic_name__ : Dict = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : List[Any] = {'vision_model': vision_model, 'text_model': text_model} __magic_name__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __magic_name__ : int = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Any , _A : List[str]=None , **_A : Optional[int] ) -> Any: __magic_name__ , __magic_name__ : Union[str, Any] = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : str = {'vision_model': vision_model, 'text_model': text_model} __magic_name__ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __magic_name__ : int = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __magic_name__ : Tuple = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) __magic_name__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __magic_name__ : Optional[Any] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __magic_name__ : List[str] = after_output[0] __magic_name__ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1E-3 ) def __lowerCAmelCase ( self : Tuple , _A : str , _A : Any , _A : int , _A : str , _A : int=None , **_A : List[Any] ) -> Union[str, Any]: __magic_name__ , __magic_name__ : Tuple = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : int = {'vision_model': vision_model, 'text_model': text_model} __magic_name__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __magic_name__ : int = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __magic_name__ : Dict = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __magic_name__ : Union[str, Any] = to_atuple(vision_model.config.image_size ) __magic_name__ : Union[str, Any] = to_atuple(vision_model.config.patch_size ) __magic_name__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __magic_name__ : str = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __magic_name__ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[Any] , _A : Optional[int] ) -> List[str]: pt_model.to(_lowerCAmelCase ) pt_model.eval() # prepare inputs __magic_name__ : str = inputs_dict __magic_name__ : int = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __magic_name__ : Union[str, Any] = pt_model(**_lowerCAmelCase ).to_tuple() __magic_name__ : Union[str, Any] = fx_model(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_lowerCAmelCase ) __magic_name__ : str = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase ) __magic_name__ : Union[str, Any] = fx_model_loaded(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_lowerCAmelCase ) __magic_name__ : Tuple = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase ) pt_model_loaded.to(_lowerCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): __magic_name__ : Optional[int] = pt_model_loaded(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 ) def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Tuple , _A : Tuple ) -> Dict: __magic_name__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : List[Any] = VisionTextDualEncoderModel(_lowerCAmelCase ) __magic_name__ : str = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) __magic_name__ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase ) __magic_name__ : Dict = fx_state self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self : str , _A : List[Any] , _A : Dict , _A : Tuple ) -> Any: __magic_name__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : Tuple = VisionTextDualEncoderModel(_lowerCAmelCase ) __magic_name__ : List[str] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) __magic_name__ : List[str] = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params ) self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Any: __magic_name__ : List[str] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def __lowerCAmelCase ( self : Optional[int] ) -> int: __magic_name__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: __magic_name__ : Tuple = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def __lowerCAmelCase ( self : int ) -> List[str]: __magic_name__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @is_pt_flax_cross_test def __lowerCAmelCase ( self : int ) -> Optional[int]: __magic_name__ : Optional[Any] = self.prepare_config_and_inputs() __magic_name__ : Optional[Any] = config_inputs_dict.pop('vision_config' ) __magic_name__ : str = config_inputs_dict.pop('text_config' ) __magic_name__ : int = config_inputs_dict self.check_equivalence_pt_to_flax(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.check_equivalence_flax_to_pt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) @slow def __lowerCAmelCase ( self : int ) -> List[str]: __magic_name__ , __magic_name__ : Union[str, Any] = self.get_pretrained_model_and_inputs() __magic_name__ : List[Any] = model_a(**_lowerCAmelCase ) __magic_name__ : List[str] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) __magic_name__ : str = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __magic_name__ : Optional[int] = model_a(**_lowerCAmelCase ) __magic_name__ : Dict = after_outputs[0] __magic_name__ : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1E-5 ) @require_flax class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) -> Tuple: __magic_name__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , ) __magic_name__ : Union[str, Any] = 13 __magic_name__ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __magic_name__ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __magic_name__ : List[str] = random_attention_mask([batch_size, 4] ) __magic_name__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Dict ) -> List[str]: __magic_name__ : Dict = FlaxViTModel(_lowerCAmelCase ) __magic_name__ : List[str] = FlaxBertModel(_lowerCAmelCase ) return vision_model, text_model def __lowerCAmelCase ( self : str ) -> Any: __magic_name__ : Optional[int] = FlaxViTModelTester(self ) __magic_name__ : Any = FlaxBertModelTester(self ) __magic_name__ : int = vit_model_tester.prepare_config_and_inputs() __magic_name__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __magic_name__ , __magic_name__ : Dict = vision_config_and_inputs __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _lowerCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Any ) -> List[str]: __magic_name__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , ) __magic_name__ : Dict = 13 __magic_name__ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __magic_name__ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __magic_name__ : Optional[int] = random_attention_mask([batch_size, 4] ) __magic_name__ : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[Any] , _A : List[Any] ) -> int: __magic_name__ : Union[str, Any] = FlaxCLIPVisionModel(_lowerCAmelCase ) __magic_name__ : str = FlaxBertModel(_lowerCAmelCase ) return vision_model, text_model def __lowerCAmelCase ( self : Any ) -> List[Any]: __magic_name__ : int = FlaxCLIPVisionModelTester(self ) __magic_name__ : Dict = FlaxBertModelTester(self ) __magic_name__ : Optional[int] = clip_model_tester.prepare_config_and_inputs() __magic_name__ : int = bert_model_tester.prepare_config_and_inputs() __magic_name__ , __magic_name__ : Any = vision_config_and_inputs __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : Optional[Any] ) -> Any: __magic_name__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) __magic_name__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) __magic_name__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) __magic_name__ : int = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ) __magic_name__ : Optional[int] = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __magic_name__ : int = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 ) )
561
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = '''sew''' def __init__( self , lowerCamelCase=32 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase=2 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=1_28 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=2_56 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ) -> Any: '''simple docstring''' super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase ) UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Any = feat_extract_norm UpperCamelCase : Optional[int] = feat_extract_activation UpperCamelCase : Optional[Any] = list(_lowerCAmelCase ) UpperCamelCase : int = list(_lowerCAmelCase ) UpperCamelCase : int = list(_lowerCAmelCase ) UpperCamelCase : Any = conv_bias UpperCamelCase : Tuple = num_conv_pos_embeddings UpperCamelCase : List[Any] = num_conv_pos_embedding_groups UpperCamelCase : Tuple = len(self.conv_dim ) UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : Dict = intermediate_size UpperCamelCase : str = squeeze_factor UpperCamelCase : str = hidden_act UpperCamelCase : List[Any] = num_attention_heads UpperCamelCase : Optional[int] = hidden_dropout UpperCamelCase : Optional[Any] = attention_dropout UpperCamelCase : str = activation_dropout UpperCamelCase : int = feat_proj_dropout UpperCamelCase : int = final_dropout UpperCamelCase : int = layerdrop UpperCamelCase : Any = layer_norm_eps UpperCamelCase : str = initializer_range UpperCamelCase : Union[str, Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase : Tuple = apply_spec_augment UpperCamelCase : Union[str, Any] = mask_time_prob UpperCamelCase : List[str] = mask_time_length UpperCamelCase : Union[str, Any] = mask_time_min_masks UpperCamelCase : Any = mask_feature_prob UpperCamelCase : int = mask_feature_length UpperCamelCase : int = mask_feature_min_masks # ctc loss UpperCamelCase : Tuple = ctc_loss_reduction UpperCamelCase : Dict = ctc_zero_infinity # sequence classification UpperCamelCase : List[str] = use_weighted_layer_sum UpperCamelCase : List[Any] = classifier_proj_size @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
173
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
0
class __lowercase : """simple docstring""" def __init__( self , A ) -> Optional[int]: '''simple docstring''' lowerCamelCase = size lowerCamelCase = [0] * size lowerCamelCase = [0] * size @staticmethod def __A ( A ) -> int: '''simple docstring''' return index | (index + 1) @staticmethod def __A ( A ) -> Tuple: '''simple docstring''' return (index & (index + 1)) - 1 def __A ( self , A , A ) -> Any: '''simple docstring''' lowerCamelCase = value while index < self.size: lowerCamelCase = self.get_prev(_lowerCAmelCase ) + 1 if current_left_border == index: lowerCamelCase = value else: lowerCamelCase = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowerCamelCase = self.get_next(_lowerCAmelCase ) def __A ( self , A , A ) -> Dict: '''simple docstring''' right -= 1 # Because of right is exclusive lowerCamelCase = 0 while left <= right: lowerCamelCase = self.get_prev(_lowerCAmelCase ) if left <= current_left: lowerCamelCase = max(_lowerCAmelCase , self.tree[right] ) lowerCamelCase = current_left else: lowerCamelCase = max(_lowerCAmelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
457
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings snake_case_ = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : int = 'rag' A_ : List[str] = True def __init__(self : List[str] , a__ : Tuple=None , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=None , a__ : Any=None , a__ : int=None , a__ : List[str]=None , a__ : str=" / " , a__ : Tuple=" // " , a__ : Any=5 , a__ : List[str]=300 , a__ : Dict=768 , a__ : Optional[int]=8 , a__ : str="wiki_dpr" , a__ : Union[str, Any]="train" , a__ : Tuple="compressed" , a__ : Dict=None , a__ : Any=None , a__ : Optional[Any]=False , a__ : Dict=False , a__ : str=0.0 , a__ : List[Any]=True , a__ : Optional[int]=False , a__ : List[Any]=False , a__ : Dict=False , a__ : Any=True , a__ : Union[str, Any]=None , **a__ : str , ): """simple docstring""" super().__init__( bos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , prefix=_lowerCAmelCase , vocab_size=_lowerCAmelCase , **_lowerCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" __snake_case = kwargs.pop('''question_encoder''' ) __snake_case = question_encoder_config.pop('''model_type''' ) __snake_case = kwargs.pop('''generator''' ) __snake_case = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig __snake_case = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase ) __snake_case = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase ) __snake_case = reduce_loss __snake_case = label_smoothing __snake_case = exclude_bos_score __snake_case = do_marginalize __snake_case = title_sep __snake_case = doc_sep __snake_case = n_docs __snake_case = max_combined_length __snake_case = dataset __snake_case = dataset_split __snake_case = index_name __snake_case = retrieval_vector_size __snake_case = retrieval_batch_size __snake_case = passages_path __snake_case = index_path __snake_case = use_dummy_dataset __snake_case = output_retrieved __snake_case = do_deduplication __snake_case = use_cache if self.forced_eos_token_id is None: __snake_case = getattr(self.generator , '''forced_eos_token_id''' , _lowerCAmelCase ) @classmethod def a (cls : int , a__ : Optional[int] , a__ : Tuple , **a__ : List[Any] ): """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowerCAmelCase ) def a (self : Tuple ): """simple docstring""" __snake_case = copy.deepcopy(self.__dict__ ) __snake_case = self.question_encoder.to_dict() __snake_case = self.generator.to_dict() __snake_case = self.__class__.model_type return output
592
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
0
'''simple docstring''' def __UpperCamelCase ( lowercase_ : str , lowercase_ : bool = False ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): a_ = F'Expected string as input, found {type(__lowerCAmelCase )}' raise ValueError(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): a_ = F'Expected boolean as use_pascal parameter, found {type(__lowerCAmelCase )}' raise ValueError(__lowerCAmelCase ) a_ = input_str.split('_' ) a_ = 0 if use_pascal else 1 a_ = words[start_index:] a_ = [word[0].upper() + word[1:] for word in words_to_capitalize] a_ = '' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
536
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : str = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = '''dpr''' def __init__( self : Tuple , __lowercase : int=3_05_22 , __lowercase : int=7_68 , __lowercase : Tuple=12 , __lowercase : int=12 , __lowercase : str=30_72 , __lowercase : str="gelu" , __lowercase : Any=0.1 , __lowercase : str=0.1 , __lowercase : Optional[int]=5_12 , __lowercase : str=2 , __lowercase : Optional[Any]=0.02 , __lowercase : str=1E-12 , __lowercase : Dict=0 , __lowercase : Union[str, Any]="absolute" , __lowercase : Any = 0 , **__lowercase : Optional[Any] , ): """simple docstring""" super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = projection_dim snake_case_ = position_embedding_type
376
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
0
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = 250 SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, length) , _lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = torch.ones((batch_size, length) , device=_lowerCAmelCase , dtype=torch.float ) / length return input_ids, scores def lowercase_ ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(5 ) SCREAMING_SNAKE_CASE__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) def lowercase_ ( self : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE__ = MaxLengthCriteria(max_length=10 ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) def lowercase_ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowercase_ ( self : List[str] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self._get_tensors(5 ) SCREAMING_SNAKE_CASE__ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) ) def lowercase_ ( self : Any ) -> Tuple: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(_lowerCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) SCREAMING_SNAKE_CASE__ = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(_lowerCAmelCase ) , 1 )
493
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
0
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __magic_name__ ( __a : Union[str, Any] , __a : Tuple , __a : Dict ): '''simple docstring''' UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __magic_name__ ( __a : Optional[Any] , __a : Union[str, Any] , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase__ = features.copy() if features else default_expected_features UpperCamelCase__ = ( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __magic_name__ ( __a : Any , __a : int , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def __magic_name__ ( __a : Any , __a : List[str] , __a : List[Any] ): '''simple docstring''' if issubclass(__lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = parquet_path elif issubclass(__lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = [parquet_path] UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) def __magic_name__ ( __a : Any , __a : str , __a : Any=("train",) ): '''simple docstring''' assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for split in splits: UpperCamelCase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Any ): '''simple docstring''' UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __magic_name__ ( __a : str , __a : Any , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase__ = features.copy() if features else default_expected_features UpperCamelCase__ = ( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase__ = ParquetDatasetReader({"""train""": parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __magic_name__ ( __a : str , __a : Dict , __a : List[Any] ): '''simple docstring''' if split: UpperCamelCase__ = {split: parquet_path} else: UpperCamelCase__ = """train""" UpperCamelCase__ = {"""train""": parquet_path, """test""": parquet_path} UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __magic_name__ ( __a : Any , __a : Any ): '''simple docstring''' UpperCamelCase__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / """foo.parquet""" ) assert writer.write() > 0 UpperCamelCase__ = pq.ParquetFile(tmp_path / """foo.parquet""" ) UpperCamelCase__ = pf.read() assert dataset.data.table == output_table def __magic_name__ ( __a : Dict , __a : Any ): '''simple docstring''' UpperCamelCase__ = str(shared_datadir / """test_image_rgb.jpg""" ) UpperCamelCase__ = {"""image""": [image_path]} UpperCamelCase__ = Features({"""image""": Image()} ) UpperCamelCase__ = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase ) UpperCamelCase__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / """foo.parquet""" ) assert writer.write() > 0 UpperCamelCase__ = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase__ = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' assert get_writer_batch_size(__lowerCAmelCase ) == expected
513
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Dict: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
405
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : List[Any] ) -> int: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __snake_case : int = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase ) else: __snake_case : Optional[int] = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase ) for i, tensor in enumerate(__lowerCAmelCase ): if padding_side == "right": if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __snake_case : int = tensor[:sequence_length] else: __snake_case : str = tensor[:sequence_length] else: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __snake_case : List[str] = tensor[:sequence_length] else: __snake_case : Dict = tensor[:sequence_length] return out_tensor.tolist() def lowerCAmelCase__( lowercase : int ) -> Dict: __snake_case : str = ord(__lowerCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __snake_case : Any = unicodedata.category(__lowerCAmelCase ) if cat.startswith("P" ): return True return False @dataclass class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[Any] =42 UpperCAmelCase_ : Union[str, Any] =True UpperCAmelCase_ : Optional[int] =None UpperCAmelCase_ : Dict =None UpperCAmelCase_ : Tuple =-100 UpperCAmelCase_ : int ="pt" def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' import torch __snake_case : Optional[int] = "label" if "label" in features[0].keys() else "labels" __snake_case : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __snake_case : Dict = self.tokenizer.pad( _lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch __snake_case : int = torch.tensor(batch["entity_ids"] ).shape[1] __snake_case : List[Any] = self.tokenizer.padding_side if padding_side == "right": __snake_case : Optional[int] = [ list(_lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) for label in labels ] else: __snake_case : Any = [ [self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) + list(_lowerCAmelCase ) for label in labels ] __snake_case : Tuple = [feature["ner_tags"] for feature in features] __snake_case : List[Any] = padding_tensor(_lowerCAmelCase , -1 , _lowerCAmelCase , _lowerCAmelCase ) __snake_case : Union[str, Any] = [feature["original_entity_spans"] for feature in features] __snake_case : Union[str, Any] = padding_tensor(_lowerCAmelCase , (-1, -1) , _lowerCAmelCase , _lowerCAmelCase ) __snake_case : int = {k: torch.tensor(_lowerCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
243
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a_ ( UpperCAmelCase__ ): def lowercase__ ( self : Dict ): __snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , 'num_attention_heads' ) ) class a_ : def __init__( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=1_3 , __lowerCAmelCase : Optional[int]=3_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Union[str, Any]=6_4_0 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : List[str]="silu" , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[int]=3_2 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=None , ): __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = last_hidden_size __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = conv_kernel_size __snake_case = output_stride __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = classifier_dropout_prob __snake_case = use_labels __snake_case = is_training __snake_case = num_labels __snake_case = initializer_range __snake_case = scope def lowercase__ ( self : Tuple ): __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase__ ( self : Dict ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): __snake_case = MobileViTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase__ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ): __snake_case = self.num_labels __snake_case = MobileViTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ): __snake_case = self.num_labels __snake_case = MobileViTForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase__ ( self : Dict ): __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowercase_ : Optional[Any] = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowercase_ : Optional[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase_ : Dict = False lowercase_ : Dict = False lowercase_ : List[str] = False lowercase_ : Optional[int] = False def lowercase__ ( self : Any ): __snake_case = MobileViTModelTester(self ) __snake_case = MobileViTConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def lowercase__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def lowercase__ ( self : int ): pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def lowercase__ ( self : Dict ): pass @unittest.skip(reason='MobileViT does not output attentions' ) def lowercase__ ( self : Dict ): pass def lowercase__ ( self : Optional[int] ): __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(_lowerCAmelCase ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase__ ( self : Any ): pass def lowercase__ ( self : Tuple ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowercase__ ( self : List[Any] ): def check_hidden_states_output(__lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): __snake_case = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) __snake_case = outputs.hidden_states __snake_case = 5 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case = 2 for i in range(len(_lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowercase__ ( self : List[str] ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def lowercase__ ( self : Optional[int] ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase ) @slow def lowercase__ ( self : Dict ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = MobileViTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def lowerCamelCase__ ( ): __snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ): return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def lowercase__ ( self : Union[str, Any] ): __snake_case = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_lowerCAmelCase ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __snake_case = model(**_lowerCAmelCase ) # verify the logits __snake_case = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __snake_case = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def lowercase__ ( self : Union[str, Any] ): __snake_case = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case = model.to(_lowerCAmelCase ) __snake_case = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case = prepare_img() __snake_case = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __snake_case = model(**_lowerCAmelCase ) __snake_case = outputs.logits # verify the logits __snake_case = torch.Size((1, 2_1, 3_2, 3_2) ) self.assertEqual(logits.shape , _lowerCAmelCase ) __snake_case = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=_lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def lowercase__ ( self : Tuple ): __snake_case = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case = model.to(_lowerCAmelCase ) __snake_case = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case = prepare_img() __snake_case = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __snake_case = model(**_lowerCAmelCase ) __snake_case = outputs.logits.detach().cpu() __snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(5_0, 6_0)] ) __snake_case = torch.Size((5_0, 6_0) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase ) __snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) __snake_case = torch.Size((3_2, 3_2) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
356
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
0
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : list[int] , lowerCAmelCase : int ): """simple docstring""" __magic_name__ : int = len(__lowerCAmelCase ) __magic_name__ : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __magic_name__ : int = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __magic_name__ : Tuple = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __magic_name__ : Optional[int] = subset[i - 1][j] if arr[i - 1] <= j: __magic_name__ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
561
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = '''data2vec-vision''' def __init__( self , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=[3, 5, 7, 11] , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=2_56 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=2_55 , **lowerCamelCase , ) -> Dict: '''simple docstring''' super().__init__(**_lowerCAmelCase ) UpperCamelCase : List[Any] = hidden_size UpperCamelCase : int = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : List[str] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : int = attention_probs_dropout_prob UpperCamelCase : List[Any] = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = image_size UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : Tuple = use_mask_token UpperCamelCase : List[Any] = use_absolute_position_embeddings UpperCamelCase : Union[str, Any] = use_relative_position_bias UpperCamelCase : Any = use_shared_relative_position_bias UpperCamelCase : List[Any] = layer_scale_init_value UpperCamelCase : Optional[int] = drop_path_rate UpperCamelCase : Optional[Any] = use_mean_pooling # decode head attributes (semantic segmentation) UpperCamelCase : str = out_indices UpperCamelCase : Any = pool_scales # auxiliary head attributes (semantic segmentation) UpperCamelCase : Optional[Any] = use_auxiliary_head UpperCamelCase : List[str] = auxiliary_loss_weight UpperCamelCase : Optional[Any] = auxiliary_channels UpperCamelCase : Any = auxiliary_num_convs UpperCamelCase : Tuple = auxiliary_concat_input UpperCamelCase : List[str] = semantic_loss_ignore_index class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' return 1e-4
173
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
0
from __future__ import annotations UpperCAmelCase : Optional[int] = 10 def __lowerCamelCase ( lowerCamelCase__ : list[int] ): '''simple docstring''' lowerCamelCase = 1 lowerCamelCase = max(__lowerCAmelCase ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase = [[] for _ in range(__lowerCAmelCase )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCAmelCase ) # put each buckets' contents into list_of_ints lowerCamelCase = 0 for b in range(__lowerCAmelCase ): for i in buckets[b]: lowerCamelCase = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
457
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : Dict ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def a (self : Dict ): """simple docstring""" __snake_case = self.dummy_uncond_unet __snake_case = ScoreSdeVeScheduler() __snake_case = ScoreSdeVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) __snake_case = torch.manual_seed(0 ) __snake_case = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_lowerCAmelCase ).images __snake_case = torch.manual_seed(0 ) __snake_case = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_lowerCAmelCase , return_dict=_lowerCAmelCase )[ 0 ] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[Any] ): """simple docstring""" __snake_case = '''google/ncsnpp-church-256''' __snake_case = UNetaDModel.from_pretrained(_lowerCAmelCase ) __snake_case = ScoreSdeVeScheduler.from_pretrained(_lowerCAmelCase ) __snake_case = ScoreSdeVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) __snake_case = torch.manual_seed(0 ) __snake_case = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_lowerCAmelCase ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __snake_case = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
592
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
0
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def _a ( self ): """simple docstring""" a_ = 'hf-internal-testing/tiny-random-t5' a_ = AutoTokenizer.from_pretrained(_lowerCAmelCase ) a_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) a_ = tokenizer('This is me' , return_tensors='pt' ) a_ = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) a_ = model.generate(**_lowerCAmelCase ) a_ = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) a_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) a_ = model_reloaded.generate(**_lowerCAmelCase ) self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) ) def _a ( self ): """simple docstring""" a_ = 'hf-internal-testing/tiny-random-t5' a_ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) a_ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_lowerCAmelCase ): model.save_pretrained(_lowerCAmelCase ) a_ = model.reverse_bettertransformer() model.save_pretrained(_lowerCAmelCase )
536
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = {'vocab_file': 'vocab.txt'} lowercase__ : int = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } lowercase__ : Tuple = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } lowercase__ : Dict = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ConvBertTokenizer def __init__( self : str , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : List[str]=True , __lowercase : Dict="[UNK]" , __lowercase : int="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : Union[str, Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : List[str]=True , __lowercase : List[Any]=None , **__lowercase : str , ): """simple docstring""" super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_lowerCAmelCase ) snake_case_ = do_lower_case def snake_case__ ( self : str , __lowercase : Union[str, Any] , __lowercase : str=None ): """simple docstring""" snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__ ( self : Union[str, Any] , __lowercase : Dict , __lowercase : int = None ): """simple docstring""" snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Any , __lowercase : Optional[int] , __lowercase : Tuple = None ): """simple docstring""" snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
376
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer _SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _SCREAMING_SNAKE_CASE : Union[str, Any] = { 'vocab_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-german-cased': ( 'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json' ), 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json' ), }, } _SCREAMING_SNAKE_CASE : List[str] = { 'distilbert-base-uncased': 512, 'distilbert-base-uncased-distilled-squad': 512, 'distilbert-base-cased': 512, 'distilbert-base-cased-distilled-squad': 512, 'distilbert-base-german-cased': 512, 'distilbert-base-multilingual-cased': 512, } _SCREAMING_SNAKE_CASE : int = { 'distilbert-base-uncased': {'do_lower_case': True}, 'distilbert-base-uncased-distilled-squad': {'do_lower_case': True}, 'distilbert-base-cased': {'do_lower_case': False}, 'distilbert-base-cased-distilled-squad': {'do_lower_case': False}, 'distilbert-base-german-cased': {'do_lower_case': False}, 'distilbert-base-multilingual-cased': {'do_lower_case': False}, } class UpperCAmelCase__ ( A__ ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = PRETRAINED_INIT_CONFIGURATION a = ["input_ids", "attention_mask"] a = DistilBertTokenizer def __init__( self : Tuple , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=True , __lowerCamelCase : Optional[Any]="[UNK]" , __lowerCamelCase : Optional[int]="[SEP]" , __lowerCamelCase : str="[PAD]" , __lowerCamelCase : int="[CLS]" , __lowerCamelCase : Any="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Dict , ) -> Dict: super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowerCAmelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ = getattr(_lowerCAmelCase , normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__ = do_lower_case SCREAMING_SNAKE_CASE__ = strip_accents SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars SCREAMING_SNAKE_CASE__ = normalizer_class(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = do_lower_case def lowercase_ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any = None ) -> str: SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] = None ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
493
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """encodec""" def __init__(self , SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 12.0, 24.0] , SCREAMING_SNAKE_CASE_=2_40_00 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=[8, 5, 4, 2] , SCREAMING_SNAKE_CASE_="weight_norm" , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="reflect" , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = target_bandwidths UpperCamelCase__ = sampling_rate UpperCamelCase__ = audio_channels UpperCamelCase__ = normalize UpperCamelCase__ = chunk_length_s UpperCamelCase__ = overlap UpperCamelCase__ = hidden_size UpperCamelCase__ = num_filters UpperCamelCase__ = num_residual_layers UpperCamelCase__ = upsampling_ratios UpperCamelCase__ = norm_type UpperCamelCase__ = kernel_size UpperCamelCase__ = last_kernel_size UpperCamelCase__ = residual_kernel_size UpperCamelCase__ = dilation_growth_rate UpperCamelCase__ = use_causal_conv UpperCamelCase__ = pad_mode UpperCamelCase__ = compress UpperCamelCase__ = num_lstm_layers UpperCamelCase__ = trim_right_ratio UpperCamelCase__ = codebook_size UpperCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size UpperCamelCase__ = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**_lowerCAmelCase ) @property def UpperCAmelCase_ (self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCAmelCase_ (self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def UpperCAmelCase_ (self ): UpperCamelCase__ = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def UpperCAmelCase_ (self ): return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
513
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class A__ ( A__ ): A__ = ['input_features', 'attention_mask'] def __init__( self : Dict , _a : Optional[Any]=80 , _a : List[Any]=1_6000 , _a : Any=0.0 , _a : List[str]=10 , _a : int=25 , _a : Tuple="hamming_window" , _a : str=3_2768.0 , _a : int=0.97 , _a : List[str]=1.0 , _a : Union[str, Any]=True , _a : List[str]=True , _a : str=False , **_a : List[Any] , ) -> List[Any]: '''simple docstring''' super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase ) _SCREAMING_SNAKE_CASE =feature_size _SCREAMING_SNAKE_CASE =sampling_rate _SCREAMING_SNAKE_CASE =padding_value _SCREAMING_SNAKE_CASE =hop_length _SCREAMING_SNAKE_CASE =win_length _SCREAMING_SNAKE_CASE =frame_signal_scale _SCREAMING_SNAKE_CASE =preemphasis_coeff _SCREAMING_SNAKE_CASE =mel_floor _SCREAMING_SNAKE_CASE =normalize_means _SCREAMING_SNAKE_CASE =normalize_vars _SCREAMING_SNAKE_CASE =win_function _SCREAMING_SNAKE_CASE =return_attention_mask _SCREAMING_SNAKE_CASE =win_length * sampling_rate // 1000 _SCREAMING_SNAKE_CASE =hop_length * sampling_rate // 1000 _SCREAMING_SNAKE_CASE =optimal_fft_length(self.sample_size ) _SCREAMING_SNAKE_CASE =(self.n_fft // 2) + 1 def A ( self : List[Any] , _a : int ) -> Tuple: '''simple docstring''' if self.win_function == "hamming_window": _SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase ) else: _SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size , name=self.win_function ) _SCREAMING_SNAKE_CASE =mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) _SCREAMING_SNAKE_CASE =spectrogram( one_waveform * self.frame_signal_scale , window=_lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCAmelCase , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def A ( self : int , _a : Dict , _a : Tuple , _a : Optional[int] ) -> Optional[int]: '''simple docstring''' if self.normalize_means: _SCREAMING_SNAKE_CASE =x[:input_length].mean(axis=0 ) _SCREAMING_SNAKE_CASE =np.subtract(_lowerCAmelCase , _lowerCAmelCase ) if self.normalize_vars: _SCREAMING_SNAKE_CASE =x[:input_length].std(axis=0 ) _SCREAMING_SNAKE_CASE =np.divide(_lowerCAmelCase , _lowerCAmelCase ) if input_length < x.shape[0]: _SCREAMING_SNAKE_CASE =padding_value # make sure array is in float32 _SCREAMING_SNAKE_CASE =x.astype(np.floataa ) return x def A ( self : Tuple , _a : Union[str, Any] , _a : int = None ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(_lowerCAmelCase , _lowerCAmelCase , self.padding_value ) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase )] def __call__( self : Union[str, Any] , _a : List[Any] , _a : Union[str, Any] = False , _a : int = None , _a : Union[str, Any] = False , _a : Union[str, Any] = None , _a : Dict = None , _a : Union[str, Any] = None , _a : Tuple = None , **_a : Optional[Any] , ) -> List[Any]: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _SCREAMING_SNAKE_CASE =isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) _SCREAMING_SNAKE_CASE =is_batched_numpy or ( isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _SCREAMING_SNAKE_CASE =[np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ): _SCREAMING_SNAKE_CASE =np.asarray(_lowerCAmelCase , dtype=np.floataa ) elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _SCREAMING_SNAKE_CASE =raw_speech.astype(np.floataa ) # always return batch if not is_batched: _SCREAMING_SNAKE_CASE =[raw_speech] # extract fbank features _SCREAMING_SNAKE_CASE =[self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding _SCREAMING_SNAKE_CASE =BatchFeature({'input_features': features} ) _SCREAMING_SNAKE_CASE =self.pad( _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , ) # make sure list is in array format _SCREAMING_SNAKE_CASE =padded_inputs.get('input_features' ) if isinstance(input_features[0] , _lowerCAmelCase ): _SCREAMING_SNAKE_CASE =[np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_features] _SCREAMING_SNAKE_CASE =padded_inputs.get('attention_mask' ) if attention_mask is not None: _SCREAMING_SNAKE_CASE =[np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: _SCREAMING_SNAKE_CASE =( np.array(_lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) _SCREAMING_SNAKE_CASE =self.normalize( padded_inputs['input_features'] , attention_mask=_lowerCAmelCase ) if return_tensors is not None: _SCREAMING_SNAKE_CASE =padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs
405
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(a ) class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> str: '''simple docstring''' __snake_case : List[str] = {} __snake_case : List[str] = {} if prompt is not None: __snake_case : str = prompt if generate_kwargs is not None: __snake_case : Dict = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __snake_case : Optional[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) __snake_case : Dict = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ "Note also that one single text can be provided for conditional image to text generation." ) __snake_case : str = self.model.config.model_type if model_type == "git": __snake_case : List[str] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) __snake_case : Optional[int] = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids __snake_case : str = [self.tokenizer.cls_token_id] + input_ids __snake_case : str = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": __snake_case : List[Any] = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __snake_case : Optional[Any] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) __snake_case : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: __snake_case : List[str] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __snake_case : List[Any] = None return model_inputs def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , _lowerCAmelCase ) and all(x is None for x in model_inputs["input_ids"] ) ): __snake_case : List[str] = None if generate_kwargs is None: __snake_case : Dict = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __snake_case : Union[str, Any] = model_inputs.pop(self.model.main_input_name ) __snake_case : List[Any] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [] for output_ids in model_outputs: __snake_case : List[str] = { "generated_text": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
243
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
0
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowercase = logging.get_logger(__name__) class a_ ( UpperCAmelCase__ ): lowercase_ : str = ['''pixel_values'''] def __init__( self : Any , __lowerCAmelCase : int = True , __lowerCAmelCase : str = None , __lowerCAmelCase : str = PILImageResampling.BILINEAR , __lowerCAmelCase : int = True , __lowerCAmelCase : str = None , __lowerCAmelCase : str = True , __lowerCAmelCase : List[str] = 1 / 2_5_5 , __lowerCAmelCase : str = True , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Tuple = None , **__lowerCAmelCase : Dict , ): super().__init__(**_lowerCAmelCase ) __snake_case = size if size is not None else {'shortest_edge': 2_5_6} __snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) __snake_case = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case = get_size_dict(_lowerCAmelCase , param_name='crop_size' ) __snake_case = do_resize __snake_case = size __snake_case = resample __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] = PILImageResampling.BICUBIC , __lowerCAmelCase : Dict = None , **__lowerCAmelCase : Optional[Any] , ): __snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __snake_case = get_resize_output_image_size(_lowerCAmelCase , size=size['shortest_edge'] , default_to_square=_lowerCAmelCase ) return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def lowercase__ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] = None , **__lowerCAmelCase : Any , ): __snake_case = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_lowerCAmelCase , size=(size['height'], size['width']) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str = None , **__lowerCAmelCase : List[Any] ): return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] = None , **__lowerCAmelCase : str , ): return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def lowercase__ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : int = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Tuple = ChannelDimension.FIRST , **__lowerCAmelCase : Dict , ): __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = size if size is not None else self.size __snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(_lowerCAmelCase , param_name='crop_size' ) __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: __snake_case = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images] if do_rescale: __snake_case = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images] if do_normalize: __snake_case = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images] __snake_case = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] __snake_case = {'pixel_values': images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase ) def lowercase__ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict = None ): __snake_case = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(_lowerCAmelCase ): __snake_case = target_sizes.numpy() __snake_case = [] for idx in range(len(_lowerCAmelCase ) ): __snake_case = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowerCAmelCase ) __snake_case = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_lowerCAmelCase ) else: __snake_case = logits.argmax(dim=1 ) __snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
356
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
0
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase :Any = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Dict = ["""input_features""", """is_longer"""] def __init__( self : Dict , _A : Dict=64 , _A : Optional[Any]=48000 , _A : Optional[int]=480 , _A : Union[str, Any]=10 , _A : str=1024 , _A : str=0.0 , _A : Dict=False , _A : Tuple = 0 , _A : str = 14000 , _A : Union[str, Any] = None , _A : Any = "fusion" , _A : Dict = "repeatpad" , **_A : List[str] , ) -> Any: super().__init__( feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , ) __magic_name__ : int = top_db __magic_name__ : str = truncation __magic_name__ : Optional[int] = padding __magic_name__ : str = fft_window_size __magic_name__ : str = (fft_window_size >> 1) + 1 __magic_name__ : int = hop_length __magic_name__ : int = max_length_s __magic_name__ : Optional[Any] = max_length_s * sampling_rate __magic_name__ : Any = sampling_rate __magic_name__ : List[Any] = frequency_min __magic_name__ : Optional[Any] = frequency_max __magic_name__ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm=_lowerCAmelCase , mel_scale='htk' , ) __magic_name__ : Optional[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ) def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: __magic_name__ : Union[str, Any] = copy.deepcopy(self.__dict__ ) __magic_name__ : List[str] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowerCAmelCase ( self : Any , _A : List[str] , _A : Optional[int] = None ) -> str: __magic_name__ : List[Any] = spectrogram( _lowerCAmelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCAmelCase , log_mel='dB' , ) return log_mel_spectrogram.T def __lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : int , _A : List[str] ) -> Tuple: __magic_name__ : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk __magic_name__ : Optional[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk __magic_name__ : List[str] = [0] # randomly choose index for each part __magic_name__ : Optional[Any] = np.random.choice(ranges[0] ) __magic_name__ : Union[str, Any] = np.random.choice(ranges[1] ) __magic_name__ : Optional[Any] = np.random.choice(ranges[2] ) __magic_name__ : Optional[int] = mel[idx_front : idx_front + chunk_frames, :] __magic_name__ : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :] __magic_name__ : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :] __magic_name__ : Optional[int] = torch.tensor(mel[None, None, :] ) __magic_name__ : Any = torch.nn.functional.interpolate( _lowerCAmelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_lowerCAmelCase ) __magic_name__ : List[Any] = mel_shrink[0][0].numpy() __magic_name__ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : Any , _A : Any , _A : Tuple ) -> List[str]: if waveform.shape[0] > max_length: if truncation == "rand_trunc": __magic_name__ : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __magic_name__ : str = len(_lowerCAmelCase ) - max_length __magic_name__ : List[str] = np.random.randint(0 , overflow + 1 ) __magic_name__ : Optional[int] = waveform[idx : idx + max_length] __magic_name__ : Optional[int] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": __magic_name__ : List[str] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters ) __magic_name__ : int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __magic_name__ : Dict = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __magic_name__ : Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 ) __magic_name__ : Dict = False else: __magic_name__ : Dict = self._random_mel_fusion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) __magic_name__ : int = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: __magic_name__ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __magic_name__ : str = int(max_length / len(_lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = np.stack(np.tile(_lowerCAmelCase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": __magic_name__ : int = int(max_length / len(_lowerCAmelCase ) ) __magic_name__ : Optional[Any] = np.stack(np.tile(_lowerCAmelCase , _lowerCAmelCase ) ) __magic_name__ : Optional[int] = np.pad(_lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": __magic_name__ : Dict = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters ) __magic_name__ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: __magic_name__ : Any = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : int , _A : List[Any] , _A : str = None , _A : Any = None , _A : List[Any] = None , _A : Dict = None , _A : Tuple = None , **_A : int , ) -> Optional[int]: __magic_name__ : Optional[Any] = truncation if truncation is not None else self.truncation __magic_name__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __magic_name__ : Dict = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) __magic_name__ : Any = is_batched_numpy or ( isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __magic_name__ : Union[str, Any] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ): __magic_name__ : Any = np.asarray(_lowerCAmelCase , dtype=np.floataa ) elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __magic_name__ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __magic_name__ : Optional[Any] = [np.asarray(_lowerCAmelCase )] # convert to mel spectrogram, truncate and pad if needed. __magic_name__ : List[Any] = [ self._get_input_mel(_lowerCAmelCase , max_length if max_length else self.nb_max_samples , _lowerCAmelCase , _lowerCAmelCase ) for waveform in raw_speech ] __magic_name__ : Tuple = [] __magic_name__ : Optional[int] = [] for mel, longer in padded_inputs: input_mel.append(_lowerCAmelCase ) is_longer.append(_lowerCAmelCase ) if truncation == "fusion" and sum(_lowerCAmelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __magic_name__ : Optional[int] = np.random.randint(0 , len(_lowerCAmelCase ) ) __magic_name__ : List[Any] = True if isinstance(input_mel[0] , _lowerCAmelCase ): __magic_name__ : int = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool __magic_name__ : Tuple = [[longer] for longer in is_longer] __magic_name__ : List[Any] = {'input_features': input_mel, 'is_longer': is_longer} __magic_name__ : Union[str, Any] = BatchFeature(_lowerCAmelCase ) if return_tensors is not None: __magic_name__ : int = input_features.convert_to_tensors(_lowerCAmelCase ) return input_features
561
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
0
'''simple docstring''' def A__ ( A : list[int]): '''simple docstring''' if not numbers: return 0 if not isinstance(__lowerCAmelCase , (list, tuple)) or not all( isinstance(__lowerCAmelCase , __lowerCAmelCase) for number in numbers): raise ValueError("numbers must be an iterable of integers") UpperCamelCase : Union[str, Any] = numbers[0] for i in range(1 , len(__lowerCAmelCase)): # update the maximum and minimum subarray products UpperCamelCase : List[Any] = numbers[i] if number < 0: UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now UpperCamelCase : List[str] = max(__lowerCAmelCase , max_till_now * number) UpperCamelCase : Optional[Any] = min(__lowerCAmelCase , min_till_now * number) # update the maximum product found till now UpperCamelCase : Dict = max(__lowerCAmelCase , __lowerCAmelCase) return max_prod
173
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
0
def __lowerCamelCase ( lowerCamelCase__ : int ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase = 1 lowerCamelCase = 1 while repunit: lowerCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def __lowerCamelCase ( lowerCamelCase__ : int = 1000000 ): '''simple docstring''' lowerCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"""{solution() = }""")
457
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Any = DebertaTokenizer A_ : Optional[int] = True A_ : str = DebertaTokenizerFast def a (self : Optional[int] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __snake_case = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) __snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __snake_case = {'''unk_token''': '''[UNK]'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCAmelCase ) ) def a (self : Union[str, Any] , **a__ : int ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def a (self : Tuple , a__ : int ): """simple docstring""" __snake_case = '''lower newer''' __snake_case = '''lower newer''' return input_text, output_text def a (self : Any ): """simple docstring""" __snake_case = self.get_tokenizer() __snake_case = '''lower newer''' __snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __snake_case = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase ) def a (self : int ): """simple docstring""" __snake_case = self.get_tokenizer() __snake_case = tokenizer('''Hello''' , '''World''' ) __snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , _lowerCAmelCase ) @slow def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCAmelCase ) __snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCAmelCase ) __snake_case = tokenizer.encode( '''sequence builders''' , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ) __snake_case = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def a (self : List[str] ): """simple docstring""" __snake_case = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __snake_case = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __snake_case = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __snake_case = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase ) __snake_case = [tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) for seq in encoding['''input_ids''']] # fmt: off __snake_case = { '''input_ids''': [ [1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __snake_case = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , _lowerCAmelCase ) for expected, decoded in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
592
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
0
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging __lowerCAmelCase = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE : """simple docstring""" _a : List[Any] = None @experimental def __UpperCamelCase ( lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ): """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return _map_with_joblib(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def __UpperCamelCase ( lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] ): """simple docstring""" a_ = num_proc if num_proc <= len(__lowerCAmelCase ) else len(__lowerCAmelCase ) a_ = [] # We organize the splits ourselve (contiguous splits) for index in range(__lowerCAmelCase ): a_ = len(__lowerCAmelCase ) // num_proc a_ = len(__lowerCAmelCase ) % num_proc a_ = div * index + min(__lowerCAmelCase , __lowerCAmelCase ) a_ = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'Error dividing inputs iterable among processes. ' F'Total number of objects {len(__lowerCAmelCase )}, ' F'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( F'Spawning {num_proc} processes for {len(__lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) a_ , a_ = None, None if not disable_tqdm: a_ , a_ = (RLock(),), tqdm.set_lock with Pool(__lowerCAmelCase , initargs=__lowerCAmelCase , initializer=__lowerCAmelCase ) as pool: a_ = pool.map(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'Finished {num_proc} processes' ) a_ = [obj for proc_res in mapped for obj in proc_res] logger.info(F'Unpacked {len(__lowerCAmelCase )} objects' ) return mapped def __UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] ): """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCAmelCase ): return joblib.Parallel()( joblib.delayed(__lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def __UpperCamelCase ( lowercase_ : str ): """simple docstring""" a_ = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: a_ = None
536
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase__ : Optional[int] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase__ : List[str] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase__ : Any = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def snake_case__ ( self : str ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def snake_case__ ( self : Any , __lowercase : Optional[int] , __lowercase : Any , __lowercase : List[Any] = 1 , __lowercase : List[Any] = 4 , ): """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase ) }
376
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
0
from sklearn.metrics import fa_score import datasets _SCREAMING_SNAKE_CASE : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' _SCREAMING_SNAKE_CASE : str = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' _SCREAMING_SNAKE_CASE : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__ ( datasets.Metric ): """simple docstring""" def lowercase_ ( self : Tuple ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int="binary" , __lowerCamelCase : Optional[int]=None ) -> Any: SCREAMING_SNAKE_CASE__ = fa_score( _lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase ) return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
493
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def __magic_name__ ( __a : int , __a : List[Any] , __a : str ): '''simple docstring''' UpperCamelCase__ = state_dict.pop(__lowerCAmelCase ) UpperCamelCase__ = val def __magic_name__ ( __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase__ = value else: UpperCamelCase__ = value return new_state_dict def __magic_name__ ( __a : Any ): '''simple docstring''' UpperCamelCase__ = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ = in_proj_weight[:256, :] UpperCamelCase__ = in_proj_bias[:256] UpperCamelCase__ = in_proj_weight[256:512, :] UpperCamelCase__ = in_proj_bias[256:512] UpperCamelCase__ = in_proj_weight[-256:, :] UpperCamelCase__ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ = in_proj_weight[:256, :] UpperCamelCase__ = in_proj_bias[:256] UpperCamelCase__ = in_proj_weight[256:512, :] UpperCamelCase__ = in_proj_bias[256:512] UpperCamelCase__ = in_proj_weight[-256:, :] UpperCamelCase__ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCamelCase__ = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCamelCase__ = in_proj_weight_cross_attn[:256, :] UpperCamelCase__ = in_proj_bias_cross_attn[:256] UpperCamelCase__ = in_proj_weight_cross_attn[256:512, :] UpperCamelCase__ = in_proj_bias_cross_attn[256:512] UpperCamelCase__ = in_proj_weight_cross_attn[-256:, :] UpperCamelCase__ = in_proj_bias_cross_attn[-256:] def __magic_name__ ( __a : Dict , __a : Dict ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase ) UpperCamelCase__ = 800 if """detection""" in checkpoint_url else 1_000 UpperCamelCase__ = target_max_size / current_max_size UpperCamelCase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __magic_name__ ( __a : Any ): '''simple docstring''' UpperCamelCase__ = F.to_tensor(__lowerCAmelCase ) UpperCamelCase__ = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def __magic_name__ ( __a : Tuple , __a : Any , __a : List[Any] ): '''simple docstring''' logger.info("""Converting model...""" ) # load original state dict UpperCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCamelCase__ = rename_backbone_keys(__lowerCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowerCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase__ = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase__ = state_dict.pop(__lowerCAmelCase ) UpperCamelCase__ = val # create HuggingFace model and load state dict UpperCamelCase__ = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCamelCase__ = 15 UpperCamelCase__ = 2 UpperCamelCase__ = {0: """table""", 1: """table rotated"""} UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} else: UpperCamelCase__ = 125 UpperCamelCase__ = 6 UpperCamelCase__ = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} UpperCamelCase__ = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1_000 ) UpperCamelCase__ = TableTransformerForObjectDetection(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # verify our conversion UpperCamelCase__ = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" UpperCamelCase__ = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase ) UpperCamelCase__ = Image.open(__lowerCAmelCase ).convert("""RGB""" ) UpperCamelCase__ = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 ) UpperCamelCase__ = model(__lowerCAmelCase ) if "detection" in checkpoint_url: UpperCamelCase__ = (1, 15, 3) UpperCamelCase__ = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCamelCase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCamelCase__ = (1, 125, 7) UpperCamelCase__ = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCamelCase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) UpperCamelCase__ = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(__lowerCAmelCase ) image_processor.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase_ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
513
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase : int = datasets.utils.logging.get_logger(__name__) lowerCamelCase : List[str] = ['names', 'prefix'] lowerCamelCase : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowerCamelCase : Optional[int] = ['encoding_errors', 'on_bad_lines'] lowerCamelCase : Optional[int] = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): A__ = ',' A__ = None A__ = 'infer' A__ = None A__ = None A__ = None A__ = None A__ = None A__ = True A__ = None A__ = None A__ = None A__ = None A__ = False A__ = None A__ = None A__ = None A__ = True A__ = True A__ = False A__ = True A__ = None A__ = '.' A__ = None A__ = '"' A__ = 0 A__ = None A__ = None A__ = None A__ = None A__ = True A__ = True A__ = 0 A__ = True A__ = False A__ = None A__ = 1_00_00 A__ = None A__ = 'strict' A__ = 'error' A__ = None def A ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.delimiter is not None: _SCREAMING_SNAKE_CASE =self.delimiter if self.column_names is not None: _SCREAMING_SNAKE_CASE =self.column_names @property def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE ={ 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): A__ = CsvConfig def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A ( self : int , _a : str ) -> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) _SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCAmelCase , (str, list, tuple) ): _SCREAMING_SNAKE_CASE =data_files if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _SCREAMING_SNAKE_CASE =[files] _SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _SCREAMING_SNAKE_CASE =[] for split_name, files in data_files.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _SCREAMING_SNAKE_CASE =[files] _SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) ) return splits def A ( self : Tuple , _a : List[str] ) -> List[str]: '''simple docstring''' if self.config.features is not None: _SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast _SCREAMING_SNAKE_CASE =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _SCREAMING_SNAKE_CASE =table_cast(_lowerCAmelCase , _lowerCAmelCase ) return pa_table def A ( self : Any , _a : Union[str, Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _SCREAMING_SNAKE_CASE =( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ): _SCREAMING_SNAKE_CASE =pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCAmelCase ): _SCREAMING_SNAKE_CASE =pa.Table.from_pandas(_lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase ) except ValueError as e: logger.error(f"Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}" ) raise
405
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
243
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class a_ : lowercase_ : str = PegasusConfig lowercase_ : Union[str, Any] = {} lowercase_ : List[Any] = '''gelu''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=1_3 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : Any=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=4_0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : int=0 , ): __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = eos_token_id __snake_case = pad_token_id __snake_case = bos_token_id def lowercase__ ( self : Any ): __snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __snake_case = tf.concat([input_ids, eos_tensor] , axis=1 ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __snake_case = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def lowercase__ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): __snake_case = TFPegasusModel(config=_lowerCAmelCase ).get_decoder() __snake_case = inputs_dict['input_ids'] __snake_case = input_ids[:1, :] __snake_case = inputs_dict['attention_mask'][:1, :] __snake_case = inputs_dict['head_mask'] __snake_case = 1 # first forward pass __snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) __snake_case , __snake_case = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __snake_case = tf.concat([input_ids, next_tokens] , axis=-1 ) __snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] __snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __snake_case = output_from_no_past[:, -3:, random_slice_idx] __snake_case = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-3 ) def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ): if attention_mask is None: __snake_case = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __snake_case = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowercase_ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () lowercase_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () lowercase_ : Dict = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) lowercase_ : List[str] = True lowercase_ : str = False lowercase_ : Any = False def lowercase__ ( self : Any ): __snake_case = TFPegasusModelTester(self ) __snake_case = ConfigTester(self , config_class=_lowerCAmelCase ) def lowercase__ ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase__ ( self : int ): __snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class a_ ( unittest.TestCase ): lowercase_ : Optional[int] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] lowercase_ : Optional[Any] = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers lowercase_ : List[str] = '''google/pegasus-xsum''' @cached_property def lowercase__ ( self : str ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase__ ( self : List[Any] ): __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowercase__ ( self : Dict , **__lowerCAmelCase : Optional[int] ): __snake_case = self.translate_src_text(**_lowerCAmelCase ) assert self.expected_text == generated_words def lowercase__ ( self : Dict , **__lowerCAmelCase : Optional[int] ): __snake_case = self.tokenizer(self.src_text , **_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='tf' ) __snake_case = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCAmelCase , ) __snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase ) return generated_words @slow def lowercase__ ( self : Any ): self._assert_generated_batch_equal_expected()
356
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
0
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase :Tuple = logging.get_logger(__name__) lowerCAmelCase :Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : int = """codegen""" A_ : Optional[int] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : str , _A : Any=50400 , _A : str=2048 , _A : int=2048 , _A : Optional[Any]=4096 , _A : Union[str, Any]=28 , _A : Union[str, Any]=16 , _A : Dict=64 , _A : Any=None , _A : Any="gelu_new" , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : Optional[int]=0.0 , _A : int=1E-5 , _A : Union[str, Any]=0.02 , _A : Tuple=True , _A : Optional[Any]=50256 , _A : Any=50256 , _A : List[Any]=False , **_A : int , ) -> str: __magic_name__ : Union[str, Any] = vocab_size __magic_name__ : Tuple = n_ctx __magic_name__ : Dict = n_positions __magic_name__ : Union[str, Any] = n_embd __magic_name__ : str = n_layer __magic_name__ : str = n_head __magic_name__ : List[str] = n_inner __magic_name__ : Any = rotary_dim __magic_name__ : Dict = activation_function __magic_name__ : List[Any] = resid_pdrop __magic_name__ : List[Any] = embd_pdrop __magic_name__ : int = attn_pdrop __magic_name__ : Union[str, Any] = layer_norm_epsilon __magic_name__ : Optional[Any] = initializer_range __magic_name__ : Tuple = use_cache __magic_name__ : Dict = bos_token_id __magic_name__ : Any = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase ) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : List[Any] , _A : str , _A : Tuple = "default" , _A : Optional[Any] = None , _A : Union[str, Any] = False , ) -> str: super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase ) if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase ): # TODO: how to do that better? __magic_name__ : Tuple = 0 @property def __lowerCAmelCase ( self : List[Any] ) -> Dict: __magic_name__ : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' ) __magic_name__ : Any = {0: 'batch', 1: 'past_sequence + sequence'} else: __magic_name__ : Tuple = {0: 'batch', 1: 'sequence'} return common_inputs @property def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: return self._config.n_layer @property def __lowerCAmelCase ( self : Tuple ) -> Dict: return self._config.n_head def __lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : Any = -1 , _A : Dict = -1 , _A : Tuple = False , _A : Any = None , ) -> List[str]: __magic_name__ : Union[str, Any] = super(_lowerCAmelCase , self ).generate_dummy_inputs( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() __magic_name__ : Dict = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __magic_name__ , __magic_name__ : Dict = common_inputs['input_ids'].shape # Not using the same length for past_key_values __magic_name__ : Any = seqlen + 2 __magic_name__ : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __magic_name__ : Union[str, Any] = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] __magic_name__ : Optional[Any] = common_inputs['attention_mask'] if self.use_past: __magic_name__ : Dict = ordered_inputs['attention_mask'].dtype __magic_name__ : List[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self : List[str] ) -> Tuple: return 13
561
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] __SCREAMING_SNAKE_CASE = '''LayoutLMv3ImageProcessor''' __SCREAMING_SNAKE_CASE = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _lowerCAmelCase , ) UpperCamelCase : Tuple = kwargs.pop("feature_extractor" ) UpperCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ) -> Union[str, Any]: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor UpperCamelCase : List[str] = self.image_processor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCamelCase : Any = features["words"] UpperCamelCase : Optional[int] = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) # add pixel values UpperCamelCase : int = features.pop("pixel_values" ) if return_overflowing_tokens is True: UpperCamelCase : Any = self.get_overflowing_images(_lowerCAmelCase , encoded_inputs["overflow_to_sample_mapping"] ) UpperCamelCase : Optional[Any] = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str: '''simple docstring''' UpperCamelCase : Any = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f''' {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}''' ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self , *lowerCamelCase , **lowerCamelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self , *lowerCamelCase , **lowerCamelCase ) -> int: '''simple docstring''' return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCAmelCase , ) return self.image_processor
173
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
0
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a__ : Dict = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=True ) -> List[str]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) UpperCAmelCase = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = True UpperCAmelCase = True print(f"Building TensorFlow model from configuration: {config}" ) UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCAmelCase = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) UpperCAmelCase = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): UpperCAmelCase = pt_model(**pt_model.dummy_inputs ) UpperCAmelCase = pto[0].numpy() UpperCAmelCase = tfo[0].numpy() UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) ) print(f"Max absolute difference between models outputs {diff}" ) assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(f"Save TensorFlow model to {tf_dump_path}" ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format='''h5''' ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Tuple: """simple docstring""" if args_model_type is None: UpperCAmelCase = list(MODEL_CLASSES.keys() ) else: UpperCAmelCase = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print('''=''' * 100 ) print(f" Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}" ) print('''=''' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCAmelCase = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCAmelCase = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print('''-''' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f" Skipping finetuned checkpoint {model_shortcut_name}" ) continue UpperCAmelCase = model_shortcut_name elif only_convert_finetuned_models: print(f" Skipping not finetuned checkpoint {model_shortcut_name}" ) continue print( f" Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}" ) print('''-''' * 100 ) if config_shortcut_name in aws_config_map: UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: UpperCAmelCase = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: UpperCAmelCase = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = '''converted_model''' convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') a__ : Dict = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
51
'''simple docstring''' from math import factorial def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int: """simple docstring""" return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
51
1
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC a__ : List[Any] = parse(importlib.metadata.version('torch')) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Version] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: """simple docstring""" if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" ) UpperCAmelCase = STR_OPERATION_TO_FUNC[operation] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE_ ) ) return operation(SCREAMING_SNAKE_CASE_ , parse(SCREAMING_SNAKE_CASE_ ) ) def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: """simple docstring""" return compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
51
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =StableUnCLIPPipeline _lowerCamelCase =TEXT_TO_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase =False def __snake_case ( self : str ): UpperCAmelCase = 32 UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ ) UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , ) torch.manual_seed(0 ) UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase = AutoencoderKL() UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self : List[Any] ): UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=a__ ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Optional[int] ): UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' ) UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a__ , a__ ) def __snake_case ( self : str ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
51
1
'''simple docstring''' a__ : int = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' a__ : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] a__ : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
51
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict: """simple docstring""" if not head: return True # split the list to two parts UpperCAmelCase, UpperCAmelCase = head.next, head while fast and fast.next: UpperCAmelCase = fast.next.next UpperCAmelCase = slow.next UpperCAmelCase = slow.next UpperCAmelCase = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase = None while second: UpperCAmelCase = second.next UpperCAmelCase = node UpperCAmelCase = second UpperCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase = node.next UpperCAmelCase = head.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head while fast and fast.next: UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase = [slow.val] while slow.next: UpperCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase = cur.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: """simple docstring""" if not head or not head.next: return True UpperCAmelCase = {} UpperCAmelCase = 0 while head: if head.val in d: d[head.val].append(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = [pos] UpperCAmelCase = head.next pos += 1 UpperCAmelCase = pos - 1 UpperCAmelCase = 0 for v in d.values(): if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0: middle += 1 else: UpperCAmelCase = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
51
1
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int: """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = number while duplicate > 0: UpperCAmelCase, UpperCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 ) fact_sum += factorial(SCREAMING_SNAKE_CASE_ ) return fact_sum == number if __name__ == "__main__": print('Program to check whether a number is a Krisnamurthy Number or not.') a__ : List[str] = int(input('Enter number: ').strip()) print( F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : str ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __snake_case ( self : Optional[int] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , ) return self.image_processor_class @property def __snake_case ( self : List[Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , ) return self.image_processor
51
1
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __snake_case ( ) -> int: """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict ): super().__init__() UpperCAmelCase = nn.Linear(3 , 4 ) UpperCAmelCase = nn.BatchNormad(4 ) UpperCAmelCase = nn.Linear(4 , 5 ) def __snake_case ( self : Dict , a__ : Tuple ): return self.lineara(self.batchnorm(self.lineara(a__ ) ) ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) def __snake_case ( self : List[Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : str , a__ : int ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase, UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __snake_case ( self : Any ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(a__ : Dict ): pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : int ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : str ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : str ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(a__ ) as cm: mock_training_loop_function(128 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __snake_case ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __snake_case ( self : Tuple ): UpperCAmelCase = torch.cuda.memory_allocated() UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , a__ ) UpperCAmelCase = release_memory(a__ ) self.assertEqual(torch.cuda.memory_allocated() , a__ )
51
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =XGLMTokenizer _lowerCamelCase =XGLMTokenizerFast _lowerCamelCase =True _lowerCamelCase =True def __snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : List[Any] ): UpperCAmelCase = '''<pad>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(a__ ) , 1008 ) def __snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __snake_case ( self : Optional[Any] ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def __snake_case ( self : Optional[int] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a__ , f.name ) UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ ) UpperCAmelCase = pickle.dumps(a__ ) pickle.loads(a__ ) def __snake_case ( self : Tuple ): if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = '''I was born in 92000, and this is falsé.''' UpperCAmelCase = tokenizer.tokenize(a__ ) UpperCAmelCase = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def __snake_case ( self : int ): UpperCAmelCase = '''Hello World!''' UpperCAmelCase = [2, 31227, 4447, 35] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : List[str] ): UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : Any ): # fmt: off UpperCAmelCase = { '''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
51
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple ): UpperCAmelCase = {} def __snake_case ( self : Any , a__ : str ): UpperCAmelCase = {} def __snake_case ( self : Optional[Any] , a__ : str , a__ : str , a__ : float ): if nodea not in self.connections: self.add_node(a__ ) if nodea not in self.connections: self.add_node(a__ ) UpperCAmelCase = probability def __snake_case ( self : Tuple ): return list(self.connections ) def __snake_case ( self : Dict , a__ : str ): UpperCAmelCase = 0 UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[tuple[str, str, float]] , SCREAMING_SNAKE_CASE_ : int ) -> dict[str, int]: """simple docstring""" UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = Counter(graph.get_nodes() ) UpperCAmelCase = start for _ in range(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = graph.transition(SCREAMING_SNAKE_CASE_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
51
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : str = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig: """simple docstring""" UpperCAmelCase = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase = 192 UpperCAmelCase = 768 UpperCAmelCase = 12 UpperCAmelCase = 3 UpperCAmelCase = [800, 1_333] UpperCAmelCase = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase = 330 UpperCAmelCase = 14 UpperCAmelCase = 6 UpperCAmelCase = 1_320 elif "yolos_s" in yolos_name: UpperCAmelCase = 384 UpperCAmelCase = 1_536 UpperCAmelCase = 12 UpperCAmelCase = 6 elif "yolos_b" in yolos_name: UpperCAmelCase = [800, 1_344] UpperCAmelCase = 91 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''coco-detection-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[: config.hidden_size, :] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" if "backbone" in name: UpperCAmelCase = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def __snake_case ( ) -> torch.Tensor: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] # load 🤗 model UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512 UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes UpperCAmelCase, UpperCAmelCase = None, None if yolos_name == "yolos_ti": UpperCAmelCase = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: UpperCAmelCase = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) UpperCAmelCase = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a__ : Optional[Any] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
51
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 , SCREAMING_SNAKE_CASE_ : int = 10 ) -> int: """simple docstring""" UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: UpperCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: UpperCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"""{solution() = }""")
51
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType a__ : List[Any] = logging.get_logger(__name__) a__ : int = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off a__ : Any = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] a__ : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="whisper" _lowerCamelCase =["past_key_values"] _lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ): UpperCAmelCase = vocab_size UpperCAmelCase = num_mel_bins UpperCAmelCase = d_model UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = max_source_positions UpperCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase = classifier_proj_size UpperCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase = apply_spec_augment UpperCAmelCase = mask_time_prob UpperCAmelCase = mask_time_length UpperCAmelCase = mask_time_min_masks UpperCAmelCase = mask_feature_prob UpperCAmelCase = mask_feature_length UpperCAmelCase = mask_feature_min_masks UpperCAmelCase = median_filter_width super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @property def __snake_case ( self : List[str] ): UpperCAmelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a__ , direction='''inputs''' ) return common_inputs def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ): UpperCAmelCase = OrderedDict() UpperCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , ) UpperCAmelCase = encoder_inputs['''input_features'''].shape[2] UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , a__ , a__ , a__ , a__ ) UpperCAmelCase = encoder_inputs.pop('''input_features''' ) UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCAmelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def __snake_case ( self : Dict ): return 1e-3
51
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a__ : Union[str, Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowerCamelCase =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _lowerCamelCase ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _lowerCamelCase ={ config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __snake_case ( self : Dict ): UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' ) UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) UpperCAmelCase = text_classifier('''This is great !''' , top_k=2 ) self.assertEqual( nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] ) UpperCAmelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 ) self.assertEqual( nested_simplify(a__ ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], ] , ) UpperCAmelCase = text_classifier('''This is great !''' , top_k=1 ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) # Legacy behavior UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=a__ ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=a__ ) self.assertEqual( nested_simplify(a__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] ) UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ ) self.assertEqual( nested_simplify(a__ ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}], ] , ) UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ ) self.assertEqual( nested_simplify(a__ ) , [ {'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_0''', '''score''': 0.504}, ] , ) @require_torch def __snake_case ( self : Union[str, Any] ): import torch UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , ) UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) @require_tf def __snake_case ( self : str ): UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' ) UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] ) @slow @require_torch def __snake_case ( self : Any ): UpperCAmelCase = pipeline('''text-classification''' ) UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) UpperCAmelCase = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) UpperCAmelCase = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] ) @slow @require_tf def __snake_case ( self : str ): UpperCAmelCase = pipeline('''text-classification''' , framework='''tf''' ) UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) UpperCAmelCase = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) UpperCAmelCase = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] ) def __snake_case ( self : Optional[Any] , a__ : Tuple , a__ : List[Any] , a__ : Any ): UpperCAmelCase = TextClassificationPipeline(model=a__ , tokenizer=a__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def __snake_case ( self : Any , a__ : int , a__ : Tuple ): UpperCAmelCase = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase = '''HuggingFace is in''' UpperCAmelCase = text_classifier(a__ ) self.assertEqual(nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) UpperCAmelCase = ['''HuggingFace is in ''', '''Paris is in France'''] UpperCAmelCase = text_classifier(a__ ) self.assertEqual( nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}, {'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase = text_classifier(a__ , top_k=a__ ) UpperCAmelCase = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(a__ ) , [[{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N, [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N] , ) UpperCAmelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''} UpperCAmelCase = text_classifier(a__ ) self.assertEqual( nested_simplify(a__ ) , {'''label''': ANY(a__ ), '''score''': ANY(a__ )} , ) self.assertTrue(outputs['''label'''] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase = [['''HuggingFace is in ''', '''Paris is in France''']] with self.assertRaises(a__ ): text_classifier(a__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] ) self.assertEqual( nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
51
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =LEDConfig _lowerCamelCase ={} _lowerCamelCase ="gelu" def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after UpperCAmelCase = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests UpperCAmelCase = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ ) UpperCAmelCase = tf.concat( [tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , ) UpperCAmelCase = global_attention_mask return config, inputs_dict def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ): UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ ) UpperCAmelCase, UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase = model(a__ , attention_mask=a__ )[0] UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a__ , a__ , rtol=1e-3 ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict: """simple docstring""" if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase =( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase =True _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False def __snake_case ( self : Optional[Any] ): UpperCAmelCase = TFLEDModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ ) def __snake_case ( self : int ): self.config_tester.run_common_tests() def __snake_case ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a__ ) def __snake_case ( self : Optional[int] ): UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] ) UpperCAmelCase = 2 UpperCAmelCase = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) UpperCAmelCase = True UpperCAmelCase = self.model_tester.seq_length UpperCAmelCase = self.model_tester.encoder_seq_length def check_decoder_attentions_output(a__ : Tuple ): UpperCAmelCase = outputs.decoder_attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(a__ : int ): UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions] UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) UpperCAmelCase = len(a__ ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) if self.is_encoder_decoder: UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_decoder_attentions_output(a__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) # Check attention is always last and order is fine UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) ) self.assertEqual(model.config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def __snake_case ( self : Any ): pass def __snake_case ( self : Union[str, Any] ): # TODO: Head-masking not yet implement pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: """simple docstring""" return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa ) a__ : int = 1e-4 @slow @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, 768) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 ) def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
51
1
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": a__ : Optional[Any] = pd.read_csv('sample_data.csv', header=None) a__ : str = df.shape[:1][0] # If you're using some other dataset input the target column a__ : Dict = df.iloc[:, 1:2] a__ : Tuple = actual_data.values.reshape(len_data, 1) a__ : int = MinMaxScaler().fit_transform(actual_data) a__ : int = 10 a__ : int = 5 a__ : Dict = 20 a__ : Union[str, Any] = len_data - periods * look_back a__ : Optional[int] = actual_data[:division] a__ : Union[str, Any] = actual_data[division - look_back :] a__ , a__ : Any = [], [] a__ , a__ : Tuple = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) a__ : Any = np.array(train_x) a__ : List[str] = np.array(test_x) a__ : str = np.array([list(i.ravel()) for i in train_y]) a__ : Optional[int] = np.array([list(i.ravel()) for i in test_y]) a__ : int = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') a__ : Optional[Any] = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) a__ : List[Any] = model.predict(x_test)
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' import argparse import json from tqdm import tqdm def __snake_case ( ) -> str: """simple docstring""" UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=SCREAMING_SNAKE_CASE_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=SCREAMING_SNAKE_CASE_ , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to store parsed gold_data_path file''' , ) UpperCAmelCase = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ ) for dpr_record in tqdm(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = dpr_record['''question'''] UpperCAmelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) if __name__ == "__main__": main()
51
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a__ : List[Any] = 'sshleifer/mar_enro_6_3_student' class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __snake_case ( self : Dict ): super().setUp() UpperCAmelCase = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , ) UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def __snake_case ( self : Optional[int] ): MarianMTModel.from_pretrained(a__ ) @slow @require_torch_gpu def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __snake_case ( self : Any ): UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCAmelCase = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script UpperCAmelCase = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' ) UpperCAmelCase = 6 UpperCAmelCase = ( ['''distillation.py'''] + bash_script.split() + [ f"--output_dir={output_dir}", '''--gpus=1''', '''--learning_rate=1e-3''', f"--num_train_epochs={epochs}", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCAmelCase = distill_main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
51
1
'''simple docstring''' import os def __snake_case ( ) -> Tuple: """simple docstring""" with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '''/p022_names.txt''' ) as file: UpperCAmelCase = str(file.readlines()[0] ) UpperCAmelCase = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() UpperCAmelCase = 0 UpperCAmelCase = 0 for i, name in enumerate(SCREAMING_SNAKE_CASE_ ): for letter in name: name_score += ord(SCREAMING_SNAKE_CASE_ ) - 64 total_score += (i + 1) * name_score UpperCAmelCase = 0 return total_score if __name__ == "__main__": print(solution())
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
51
1
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict: """simple docstring""" UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"Building PyTorch model from configuration: {config}" ) UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--mobilebert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained MobileBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a__ : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
51
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
51
1
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: """simple docstring""" UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' ) UpperCAmelCase = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) UpperCAmelCase = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) return image def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" if "visual_encoder" in key: UpperCAmelCase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , SCREAMING_SNAKE_CASE_ ) if "blocks" in key: UpperCAmelCase = re.sub(R'''blocks''' , '''layers''' , SCREAMING_SNAKE_CASE_ ) if "attn" in key: UpperCAmelCase = re.sub(R'''attn''' , '''self_attn''' , SCREAMING_SNAKE_CASE_ ) if "norm1" in key: UpperCAmelCase = re.sub(R'''norm1''' , '''layer_norm1''' , SCREAMING_SNAKE_CASE_ ) if "norm2" in key: UpperCAmelCase = re.sub(R'''norm2''' , '''layer_norm2''' , SCREAMING_SNAKE_CASE_ ) if "encoder.norm" in key: UpperCAmelCase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , SCREAMING_SNAKE_CASE_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , SCREAMING_SNAKE_CASE_ ) if "encoder.pos_embed" in key: UpperCAmelCase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , SCREAMING_SNAKE_CASE_ ) if "encoder.cls_token" in key: UpperCAmelCase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , SCREAMING_SNAKE_CASE_ ) if "self_attn" in key: UpperCAmelCase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , SCREAMING_SNAKE_CASE_ ) return key @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None ) -> int: """simple docstring""" if config_path is not None: UpperCAmelCase = BlipConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) UpperCAmelCase = BlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' UpperCAmelCase = blip_decoder(pretrained=SCREAMING_SNAKE_CASE_ , image_size=384 , vit='''base''' ) UpperCAmelCase = pt_model.eval() UpperCAmelCase = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = value hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = 384 UpperCAmelCase = load_demo_image(image_size=SCREAMING_SNAKE_CASE_ , device='''cpu''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) UpperCAmelCase = tokenizer(['''a picture of'''] ).input_ids UpperCAmelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] UpperCAmelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ ) assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) UpperCAmelCase = blip_vqa(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='''base''' ) vqa_model.eval() UpperCAmelCase = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = value UpperCAmelCase = BlipForQuestionAnswering(SCREAMING_SNAKE_CASE_ ) hf_vqa_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = ['''How many dogs are in this image?'''] UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids UpperCAmelCase = hf_vqa_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' UpperCAmelCase = blip_itm(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='''base''' ) itm_model.eval() UpperCAmelCase = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = value UpperCAmelCase = BlipForImageTextRetrieval(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = ['''A picture of a woman with a dog sitting in a beach'''] UpperCAmelCase = tokenizer( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) hf_itm_model.eval() UpperCAmelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a__ : int = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
51
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ): pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str: """simple docstring""" UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict: """simple docstring""" UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _lowerCamelCase =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ): UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __snake_case ( self : int , a__ : Dict , a__ : Tuple ): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def __snake_case ( self : str ): pass @slow @require_torch def __snake_case ( self : Optional[Any] ): UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871} ] , ) # fmt: on @require_torch @slow def __snake_case ( self : Dict ): UpperCAmelCase = '''facebook/sam-vit-huge''' UpperCAmelCase = pipeline('''mask-generation''' , model=a__ ) UpperCAmelCase = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, ] , )
51
1
'''simple docstring''' def __snake_case ( ) -> list[list[int]]: """simple docstring""" return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )] a__ : str = generate_large_matrix() a__ : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> None: """simple docstring""" assert all(row == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for row in grid ) assert all(list(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for col in zip(*SCREAMING_SNAKE_CASE_ ) ) def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int: """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: UpperCAmelCase = (left + right) // 2 UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: UpperCAmelCase = mid + 1 else: UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int: """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = len(grid[0] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(SCREAMING_SNAKE_CASE_ ) * len(grid[0] )) - total def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int: """simple docstring""" UpperCAmelCase = 0 for row in grid: for i, number in enumerate(SCREAMING_SNAKE_CASE_ ): if number < 0: total += len(SCREAMING_SNAKE_CASE_ ) - i break return total def __snake_case ( ) -> None: """simple docstring""" from timeit import timeit print('''Running benchmarks''' ) UpperCAmelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): UpperCAmelCase = timeit(f"{func}(grid=grid)" , setup=SCREAMING_SNAKE_CASE_ , number=500 ) print(f"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
51
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
1
'''simple docstring''' class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Union[str, Any] , a__ : list[int] ): UpperCAmelCase = len(a__ ) UpperCAmelCase = [0] * len_array if len_array > 0: UpperCAmelCase = array[0] for i in range(1 , a__ ): UpperCAmelCase = self.prefix_sum[i - 1] + array[i] def __snake_case ( self : int , a__ : int , a__ : int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __snake_case ( self : List[str] , a__ : int ): UpperCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(a__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
51
'''simple docstring''' import torch from transformers import AutoModel class lowerCAmelCase__ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ): super(a__ , self ).__init__() UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ ) UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 ) UpperCAmelCase = torch.nn.Softmax(dim=1 ) def __snake_case ( self : List[Any] , **a__ : Tuple ): return self.bert(**a__ ).last_hidden_state def __snake_case ( self : int , a__ : List[str] ): return token_embeddings.sum(2 , keepdim=a__ ) def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ): return self.softmax(T * self.cos(a__ , a__ ) ) def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ): UpperCAmelCase = W_supports['''sizes'''].tolist() UpperCAmelCase = W_supports['''start_token_id'''].item() UpperCAmelCase = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = W_supports['''input_ids'''] == start_token_id UpperCAmelCase = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(a__ ): if i == 0: UpperCAmelCase = 0 else: UpperCAmelCase = support_sizes[i - 1] UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]] UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]] UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCAmelCase = torch.vstack((p_starts, p_start) ) UpperCAmelCase = torch.vstack((p_ends, p_end) ) else: UpperCAmelCase = p_start UpperCAmelCase = p_end return p_starts, p_ends
51
1
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =(UnCLIPScheduler,) def __snake_case ( self : Tuple , **a__ : Dict ): UpperCAmelCase = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**a__ ) return config def __snake_case ( self : Dict ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=a__ ) def __snake_case ( self : Optional[Any] ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=a__ ) def __snake_case ( self : Dict ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=a__ ) def __snake_case ( self : str ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=a__ ) def __snake_case ( self : Optional[int] ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=a__ ) def __snake_case ( self : Any ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=a__ , prev_timestep=a__ ) def __snake_case ( self : Any ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' ) UpperCAmelCase = scheduler_class(**a__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def __snake_case ( self : List[Any] ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(variance_type='''learned_range''' ) UpperCAmelCase = scheduler_class(**a__ ) UpperCAmelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=a__ ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=a__ ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=a__ ) - -0.0_010_011 < 1e-5 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) UpperCAmelCase = scheduler.timesteps UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter UpperCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(a__ ): # 1. predict noise residual UpperCAmelCase = model(a__ , a__ ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample UpperCAmelCase = pred_prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(25 ) UpperCAmelCase = scheduler.timesteps UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter UpperCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(a__ ): # 1. predict noise residual UpperCAmelCase = model(a__ , a__ ) if i + 1 == timesteps.shape[0]: UpperCAmelCase = None else: UpperCAmelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 UpperCAmelCase = scheduler.step( a__ , a__ , a__ , prev_timestep=a__ , generator=a__ ).prev_sample UpperCAmelCase = pred_prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def __snake_case ( self : int ): pass def __snake_case ( self : List[Any] ): pass
51
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =(EulerDiscreteScheduler,) _lowerCamelCase =10 def __snake_case ( self : str , **a__ : Tuple ): UpperCAmelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**a__ ) return config def __snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=a__ ) def __snake_case ( self : Optional[int] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=a__ , beta_end=a__ ) def __snake_case ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a__ ) def __snake_case ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 0.0_002 ) < 1e-2 assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3 def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
51
1
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a__ : Union[str, Any] = logging.getLogger(__name__) a__ : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a__ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase_ )} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "The input training data file (a text file)."} ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) _lowerCamelCase =field(default=UpperCAmelCase_ , metadata={"help": "Whether ot not to use whole word mask."} ) _lowerCamelCase =field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) _lowerCamelCase =field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) _lowerCamelCase =field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) _lowerCamelCase =field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __snake_case ( SCREAMING_SNAKE_CASE_ : DataTrainingArguments , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[str] = None , ) -> Any: """simple docstring""" def _dataset(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE_ , ) return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size ) else: return TextDataset( tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE_ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE_ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __snake_case ( ) -> Tuple: """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: UpperCAmelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) UpperCAmelCase = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE_ ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: UpperCAmelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: UpperCAmelCase = min(data_args.block_size , tokenizer.max_len ) # Get datasets UpperCAmelCase = ( get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) UpperCAmelCase = ( get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , evaluate=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": UpperCAmelCase = DataCollatorForPermutationLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: UpperCAmelCase = DataCollatorForWholeWordMask( tokenizer=SCREAMING_SNAKE_CASE_ , mlm_probability=data_args.mlm_probability ) else: UpperCAmelCase = DataCollatorForLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCAmelCase = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , prediction_loss_only=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: UpperCAmelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=SCREAMING_SNAKE_CASE_ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = math.exp(eval_output['''eval_loss'''] ) UpperCAmelCase = {'''perplexity''': perplexity} UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(SCREAMING_SNAKE_CASE_ ) return results def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" main() if __name__ == "__main__": main()
51
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any: """simple docstring""" return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =field( metadata={"help": "The csv file to plot."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." } , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , ) _lowerCamelCase =list_field( default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: """simple docstring""" try: int(SCREAMING_SNAKE_CASE_ ) return True except ValueError: return False def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str: """simple docstring""" try: float(SCREAMING_SNAKE_CASE_ ) return True except ValueError: return False class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict , a__ : Optional[int] ): UpperCAmelCase = args UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: UpperCAmelCase = csv.DictReader(a__ ) for row in reader: UpperCAmelCase = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None UpperCAmelCase = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None UpperCAmelCase = float(row['''result'''] ) def __snake_case ( self : Dict ): UpperCAmelCase, UpperCAmelCase = plt.subplots() UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage''' UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) UpperCAmelCase = self.result_dict[model_name]['''result'''] ((UpperCAmelCase), (UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , ) else: UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((UpperCAmelCase), (UpperCAmelCase)) = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )] plt.scatter( a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" ) plt.plot(a__ , a__ , '''--''' ) title_str += f" {label_model_name} vs." UpperCAmelCase = title_str[:-4] UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(a__ ) plt.xlabel(a__ ) plt.ylabel(a__ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __snake_case ( ) -> Tuple: """simple docstring""" UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = parser.parse_args_into_dataclasses()[0] UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ ) plot.plot() if __name__ == "__main__": main()
51
1
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : str ): UpperCAmelCase = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) UpperCAmelCase = Vector() def __snake_case ( self : Optional[Any] ): UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(a__ ) , '''(0,0,0,0,0,1)''' ) def __snake_case ( self : List[str] ): UpperCAmelCase = Vector([1, 2, 3, 4] ) self.assertEqual(len(a__ ) , 4 ) def __snake_case ( self : int ): UpperCAmelCase = Vector([1, 2] ) UpperCAmelCase = Vector([1, 2, 3, 4, 5] ) UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = Vector([1, 2, 3] ) UpperCAmelCase = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = Vector([1, 2, 3] ) UpperCAmelCase = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __snake_case ( self : List[Any] ): UpperCAmelCase = Vector([1, 2, 3] ) UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product UpperCAmelCase = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def __snake_case ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def __snake_case ( self : Union[str, Any] ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def __snake_case ( self : int ): UpperCAmelCase = Vector([1, 2, 3] ) UpperCAmelCase = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , a__ , a__ ) ) , '''(3,4,7)''' ) def __snake_case ( self : List[Any] ): UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] ) UpperCAmelCase = x.copy() self.assertEqual(str(a__ ) , str(a__ ) ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(a__ ) , '''(0,1,0)''' ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) ) def __snake_case ( self : Tuple ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(a__ , a__ ) ) def __snake_case ( self : int ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(a__ , a__ ) ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __snake_case ( self : List[Any] ): UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) UpperCAmelCase = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def __snake_case ( self : Dict ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) ) def __snake_case ( self : Dict ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def __snake_case ( self : Tuple ): UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def __snake_case ( self : str ): self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
51
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ): UpperCAmelCase = [] for k, v in d.items(): UpperCAmelCase = parent_key + sep + k if parent_key else k if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() ) else: items.append((new_key, v) ) return dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = argparse.Namespace() with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file: try: UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader ) UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ ) for k, v in flat_cfg.items(): setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) ) return config def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase = MobileViTVaConfig() UpperCAmelCase = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase = 151 UpperCAmelCase = 512 UpperCAmelCase = '''ade20k-id2label.json''' UpperCAmelCase = True elif task_name.startswith('''voc_''' ): UpperCAmelCase = 21 UpperCAmelCase = 512 UpperCAmelCase = '''pascal-voc-id2label.json''' UpperCAmelCase = True # orig_config UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ ) assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: """simple docstring""" UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = val def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int: """simple docstring""" if base_model: UpperCAmelCase = '''''' else: UpperCAmelCase = '''mobilevitv2.''' UpperCAmelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase = k[8:] else: UpperCAmelCase = k if ".block." in k: UpperCAmelCase = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." ) for i in [1, 2]: if f"layer_{i}." in k: UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f"layer_{i}.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if f"layer_{i}.1.local_rep.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if f"layer_{i}.1.local_rep.1." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase = [0, 1] elif i == 4: UpperCAmelCase = [0, 1, 2, 3] elif i == 5: UpperCAmelCase = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int: """simple docstring""" UpperCAmelCase = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(SCREAMING_SNAKE_CASE_ ) for k in keys_to_ignore: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __snake_case ( ) -> List[Any]: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False else: UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False # remove and rename some keys of load the original model UpperCAmelCase = checkpoint remove_unused_keys(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load modified state_dict model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase = outputs.logits UpperCAmelCase = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ) assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) a__ : str = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
51
1
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ): pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str: """simple docstring""" UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict: """simple docstring""" UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _lowerCamelCase =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ): UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __snake_case ( self : int , a__ : Dict , a__ : Tuple ): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def __snake_case ( self : str ): pass @slow @require_torch def __snake_case ( self : Optional[Any] ): UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871} ] , ) # fmt: on @require_torch @slow def __snake_case ( self : Dict ): UpperCAmelCase = '''facebook/sam-vit-huge''' UpperCAmelCase = pipeline('''mask-generation''' , model=a__ ) UpperCAmelCase = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, ] , )
51
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="megatron-bert" def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ): super().__init__(pad_token_id=a__ , **a__ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = position_embedding_type UpperCAmelCase = use_cache
51
1
'''simple docstring''' from collections import deque class lowerCAmelCase__ : '''simple docstring''' def __init__( self : List[Any] , a__ : str , a__ : int , a__ : int ): UpperCAmelCase = process_name # process name UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time UpperCAmelCase = arrival_time UpperCAmelCase = burst_time # remaining burst time UpperCAmelCase = 0 # total time of the process wait in ready queue UpperCAmelCase = 0 # time from arrival time to completion time class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , a__ : int , a__ : list[int] , a__ : deque[Process] , a__ : int , ): # total number of mlfq's queues UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied UpperCAmelCase = time_slices # unfinished process is in this ready_queue UpperCAmelCase = queue # current time UpperCAmelCase = current_time # finished process is in this sequence queue UpperCAmelCase = deque() def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __snake_case ( self : Any , a__ : list[Process] ): UpperCAmelCase = [] for i in range(len(a__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __snake_case ( self : Any , a__ : list[Process] ): UpperCAmelCase = [] for i in range(len(a__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __snake_case ( self : Optional[int] , a__ : list[Process] ): UpperCAmelCase = [] for i in range(len(a__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def __snake_case ( self : List[Any] , a__ : deque[Process] ): return [q.burst_time for q in queue] def __snake_case ( self : Optional[int] , a__ : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __snake_case ( self : Union[str, Any] , a__ : deque[Process] ): UpperCAmelCase = deque() # sequence deque of finished process while len(a__ ) != 0: UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(a__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 UpperCAmelCase = 0 # set the process's turnaround time because it is finished UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(a__ ) self.finish_queue.extend(a__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __snake_case ( self : Dict , a__ : deque[Process] , a__ : int ): UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(a__ ) ): UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(a__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(a__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished UpperCAmelCase = 0 # set the finish time UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(a__ ) self.finish_queue.extend(a__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __snake_case ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): UpperCAmelCase, UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest a__ : int = Process('P1', 0, 53) a__ : str = Process('P2', 0, 17) a__ : int = Process('P3', 0, 68) a__ : str = Process('P4', 0, 24) a__ : Union[str, Any] = 3 a__ : Dict = [17, 25] a__ : Union[str, Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) a__ : List[str] = Process('P1', 0, 53) a__ : int = Process('P2', 0, 17) a__ : List[str] = Process('P3', 0, 68) a__ : Tuple = Process('P4', 0, 24) a__ : Optional[Any] = 3 a__ : Optional[int] = [17, 25] a__ : List[str] = deque([Pa, Pa, Pa, Pa]) a__ : int = MLFQ(number_of_queues, time_slices, queue, 0) a__ : List[str] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
51
'''simple docstring''' from __future__ import annotations a__ : List[str] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ): UpperCAmelCase = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase = {} UpperCAmelCase = source_vertex def __snake_case ( self : Optional[int] ): UpperCAmelCase = {self.source_vertex} UpperCAmelCase = None UpperCAmelCase = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(a__ ) UpperCAmelCase = vertex queue.append(a__ ) def __snake_case ( self : Any , a__ : str ): if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase = self.parent.get(a__ ) if target_vertex_parent is None: UpperCAmelCase = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(a__ ) return self.shortest_path(a__ ) + f"->{target_vertex}" if __name__ == "__main__": a__ : Tuple = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
51
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : List[Any] ): UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(a__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , a__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1e-3 ) ) @slow def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(a__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , a__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1e-3 ) )
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a__ : Any = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
51
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Any = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: """simple docstring""" return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="attention" ) -> Dict: """simple docstring""" UpperCAmelCase = UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] ) UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] ) UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] ) UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] ) UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> List[str]: """simple docstring""" if split_mlp_wi: UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] UpperCAmelCase = (wi_a, wi_a) else: UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any: """simple docstring""" return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[str]: """simple docstring""" UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] ) UpperCAmelCase = {'''/'''.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = collections.OrderedDict() # Shared embeddings. UpperCAmelCase = old['''token_embedder/embedding'''] # Encoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 1 (MLP). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = layer_norm if split_mlp_wi: UpperCAmelCase = wi[0].T UpperCAmelCase = wi[1].T else: UpperCAmelCase = wi.T UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' ).T UpperCAmelCase = old['''encoder/encoder_norm/scale'''] if not scalable_attention: UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , '''encoder''' ).T UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , '''decoder''' ).T if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''self_attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''encoder_decoder_attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 2 (MLP). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = layer_norm if split_mlp_wi: UpperCAmelCase = wi[0].T UpperCAmelCase = wi[1].T else: UpperCAmelCase = wi.T UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' ).T UpperCAmelCase = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T return new def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool ) -> Any: """simple docstring""" UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) UpperCAmelCase = state_dict['''shared.weight'''] return state_dict def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: """simple docstring""" UpperCAmelCase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = convert_tax_to_pytorch( SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Dict: """simple docstring""" UpperCAmelCase = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE_ ) print('''Done''' ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) a__ : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
51
'''simple docstring''' from math import factorial def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int: """simple docstring""" return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
51
1
'''simple docstring''' from __future__ import annotations a__ : List[str] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ): UpperCAmelCase = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase = {} UpperCAmelCase = source_vertex def __snake_case ( self : Optional[int] ): UpperCAmelCase = {self.source_vertex} UpperCAmelCase = None UpperCAmelCase = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(a__ ) UpperCAmelCase = vertex queue.append(a__ ) def __snake_case ( self : Any , a__ : str ): if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase = self.parent.get(a__ ) if target_vertex_parent is None: UpperCAmelCase = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(a__ ) return self.shortest_path(a__ ) + f"->{target_vertex}" if __name__ == "__main__": a__ : Tuple = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
51
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =StableUnCLIPPipeline _lowerCamelCase =TEXT_TO_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase =False def __snake_case ( self : str ): UpperCAmelCase = 32 UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ ) UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , ) torch.manual_seed(0 ) UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase = AutoencoderKL() UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self : List[Any] ): UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=a__ ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Optional[int] ): UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' ) UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a__ , a__ ) def __snake_case ( self : str ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
51
1
'''simple docstring''' import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , a__ : Optional[int] , a__ : List[Any]=13 , a__ : Optional[int]=7 , a__ : str=6 , a__ : Union[str, Any]=17 , a__ : List[Any]=23 , a__ : Tuple=11 , a__ : Optional[int]=True , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = act_dim UpperCAmelCase = state_dim UpperCAmelCase = hidden_size UpperCAmelCase = max_length UpperCAmelCase = is_training def __snake_case ( self : Optional[Any] ): UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) UpperCAmelCase = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __snake_case ( self : List[Any] ): return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __snake_case ( self : Dict , a__ : Union[str, Any] , a__ : Dict , a__ : Dict , a__ : Dict , a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , ): UpperCAmelCase = DecisionTransformerModel(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , a__ , a__ , a__ , a__ , a__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ) = config_and_inputs UpperCAmelCase = { '''states''': states, '''actions''': actions, '''rewards''': rewards, '''returns_to_go''': returns_to_go, '''timesteps''': timesteps, '''attention_mask''': attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =(DecisionTransformerModel,) if is_torch_available() else () _lowerCamelCase =() _lowerCamelCase ={"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _lowerCamelCase =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = DecisionTransformerModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 ) def __snake_case ( self : str ): self.config_tester.run_common_tests() def __snake_case ( self : str ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) @slow def __snake_case ( self : List[str] ): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = DecisionTransformerModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def __snake_case ( self : List[str] ): UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(a__ ) UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = [ '''states''', '''actions''', '''rewards''', '''returns_to_go''', '''timesteps''', '''attention_mask''', ] self.assertListEqual(arg_names[: len(a__ )] , a__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : Optional[Any] ): UpperCAmelCase = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase = 10 # defined by the RL environment, may be normalized UpperCAmelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' ) UpperCAmelCase = model.to(a__ ) UpperCAmelCase = model.config torch.manual_seed(0 ) UpperCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=a__ ) UpperCAmelCase = torch.tensor(a__ , device=a__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase = state UpperCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=a__ , dtype=torch.floataa ) UpperCAmelCase = torch.zeros(1 , 0 , device=a__ , dtype=torch.floataa ) UpperCAmelCase = torch.tensor(0 , device=a__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(a__ ): UpperCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a__ )] , dim=1 ) UpperCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=a__ )] , dim=1 ) UpperCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = model( states=a__ , actions=a__ , rewards=a__ , returns_to_go=a__ , timesteps=a__ , attention_mask=a__ , return_dict=a__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase = action_pred[0, -1] UpperCAmelCase = torch.cat([states, state] , dim=1 ) UpperCAmelCase = returns_to_go[0, -1] - reward UpperCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase = torch.cat( [timesteps, torch.ones((1, 1) , device=a__ , dtype=torch.long ) * (step + 1)] , dim=1 )
51
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict: """simple docstring""" if not head: return True # split the list to two parts UpperCAmelCase, UpperCAmelCase = head.next, head while fast and fast.next: UpperCAmelCase = fast.next.next UpperCAmelCase = slow.next UpperCAmelCase = slow.next UpperCAmelCase = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase = None while second: UpperCAmelCase = second.next UpperCAmelCase = node UpperCAmelCase = second UpperCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase = node.next UpperCAmelCase = head.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head while fast and fast.next: UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase = [slow.val] while slow.next: UpperCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase = cur.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: """simple docstring""" if not head or not head.next: return True UpperCAmelCase = {} UpperCAmelCase = 0 while head: if head.val in d: d[head.val].append(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = [pos] UpperCAmelCase = head.next pos += 1 UpperCAmelCase = pos - 1 UpperCAmelCase = 0 for v in d.values(): if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0: middle += 1 else: UpperCAmelCase = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
51
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : str ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __snake_case ( self : Optional[int] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , ) return self.image_processor_class @property def __snake_case ( self : List[Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , ) return self.image_processor
51
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =StableDiffusionLatentUpscalePipeline _lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } _lowerCamelCase =PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} _lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowerCamelCase =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowerCamelCase =frozenset([] ) _lowerCamelCase =True @property def __snake_case ( self : Optional[Any] ): UpperCAmelCase = 1 UpperCAmelCase = 4 UpperCAmelCase = (16, 16) UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ ) return image def __snake_case ( self : Tuple ): torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=a__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=a__ , only_cross_attention=a__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) UpperCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' ) UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , ) UpperCAmelCase = CLIPTextModel(a__ ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __snake_case ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self : Optional[int] ): UpperCAmelCase = '''cpu''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = pipe(**a__ ).images UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) UpperCAmelCase = np.array( [0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] ) UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a__ , 1e-3 ) def __snake_case ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def __snake_case ( self : Optional[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def __snake_case ( self : str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def __snake_case ( self : Optional[int] ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def __snake_case ( self : List[Any] ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def __snake_case ( self : Optional[int] ): super().test_save_load_local(expected_max_difference=3e-3 ) def __snake_case ( self : List[str] ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**a__ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = 2 UpperCAmelCase = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue UpperCAmelCase = getattr(a__ , scheduler_enum.name ) UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config ) UpperCAmelCase = pipe(**a__ )[0] outputs.append(a__ ) assert check_same_shape(a__ ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[Any] ): UpperCAmelCase = torch.manual_seed(33 ) UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) UpperCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' UpperCAmelCase = pipe(a__ , generator=a__ , output_type='''latent''' ).images UpperCAmelCase = upscaler( prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0] UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def __snake_case ( self : Optional[int] ): UpperCAmelCase = torch.manual_seed(33 ) UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) UpperCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) UpperCAmelCase = upscaler( prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0] UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
51
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =XGLMTokenizer _lowerCamelCase =XGLMTokenizerFast _lowerCamelCase =True _lowerCamelCase =True def __snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : List[Any] ): UpperCAmelCase = '''<pad>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(a__ ) , 1008 ) def __snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __snake_case ( self : Optional[Any] ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def __snake_case ( self : Optional[int] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a__ , f.name ) UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ ) UpperCAmelCase = pickle.dumps(a__ ) pickle.loads(a__ ) def __snake_case ( self : Tuple ): if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = '''I was born in 92000, and this is falsé.''' UpperCAmelCase = tokenizer.tokenize(a__ ) UpperCAmelCase = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def __snake_case ( self : int ): UpperCAmelCase = '''Hello World!''' UpperCAmelCase = [2, 31227, 4447, 35] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : List[str] ): UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : Any ): # fmt: off UpperCAmelCase = { '''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
51
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Dict = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : str = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig: """simple docstring""" UpperCAmelCase = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase = 192 UpperCAmelCase = 768 UpperCAmelCase = 12 UpperCAmelCase = 3 UpperCAmelCase = [800, 1_333] UpperCAmelCase = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase = 330 UpperCAmelCase = 14 UpperCAmelCase = 6 UpperCAmelCase = 1_320 elif "yolos_s" in yolos_name: UpperCAmelCase = 384 UpperCAmelCase = 1_536 UpperCAmelCase = 12 UpperCAmelCase = 6 elif "yolos_b" in yolos_name: UpperCAmelCase = [800, 1_344] UpperCAmelCase = 91 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''coco-detection-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[: config.hidden_size, :] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" if "backbone" in name: UpperCAmelCase = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def __snake_case ( ) -> torch.Tensor: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] # load 🤗 model UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512 UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes UpperCAmelCase, UpperCAmelCase = None, None if yolos_name == "yolos_ti": UpperCAmelCase = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: UpperCAmelCase = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) UpperCAmelCase = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a__ : Optional[Any] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
51
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =XGLMTokenizer _lowerCamelCase =XGLMTokenizerFast _lowerCamelCase =True _lowerCamelCase =True def __snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : List[Any] ): UpperCAmelCase = '''<pad>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(a__ ) , 1008 ) def __snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __snake_case ( self : Optional[Any] ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def __snake_case ( self : Optional[int] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a__ , f.name ) UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ ) UpperCAmelCase = pickle.dumps(a__ ) pickle.loads(a__ ) def __snake_case ( self : Tuple ): if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = '''I was born in 92000, and this is falsé.''' UpperCAmelCase = tokenizer.tokenize(a__ ) UpperCAmelCase = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def __snake_case ( self : int ): UpperCAmelCase = '''Hello World!''' UpperCAmelCase = [2, 31227, 4447, 35] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : List[str] ): UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : Any ): # fmt: off UpperCAmelCase = { '''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
51
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType a__ : List[Any] = logging.get_logger(__name__) a__ : int = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off a__ : Any = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] a__ : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="whisper" _lowerCamelCase =["past_key_values"] _lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ): UpperCAmelCase = vocab_size UpperCAmelCase = num_mel_bins UpperCAmelCase = d_model UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = max_source_positions UpperCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase = classifier_proj_size UpperCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase = apply_spec_augment UpperCAmelCase = mask_time_prob UpperCAmelCase = mask_time_length UpperCAmelCase = mask_time_min_masks UpperCAmelCase = mask_feature_prob UpperCAmelCase = mask_feature_length UpperCAmelCase = mask_feature_min_masks UpperCAmelCase = median_filter_width super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @property def __snake_case ( self : List[str] ): UpperCAmelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a__ , direction='''inputs''' ) return common_inputs def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ): UpperCAmelCase = OrderedDict() UpperCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , ) UpperCAmelCase = encoder_inputs['''input_features'''].shape[2] UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , a__ , a__ , a__ , a__ ) UpperCAmelCase = encoder_inputs.pop('''input_features''' ) UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCAmelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def __snake_case ( self : Dict ): return 1e-3
51
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : Tuple = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="yolos" def __init__( self : Any , a__ : List[Any]=768 , a__ : str=12 , a__ : Tuple=12 , a__ : Union[str, Any]=3072 , a__ : int="gelu" , a__ : Tuple=0.0 , a__ : Dict=0.0 , a__ : Dict=0.02 , a__ : Tuple=1e-1_2 , a__ : str=[512, 864] , a__ : List[Any]=16 , a__ : Dict=3 , a__ : str=True , a__ : Union[str, Any]=100 , a__ : Tuple=True , a__ : List[str]=False , a__ : Optional[Any]=1 , a__ : Dict=5 , a__ : int=2 , a__ : str=5 , a__ : Tuple=2 , a__ : Tuple=0.1 , **a__ : Optional[int] , ): super().__init__(**a__ ) UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = qkv_bias UpperCAmelCase = num_detection_tokens UpperCAmelCase = use_mid_position_embeddings UpperCAmelCase = auxiliary_loss # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =version.parse("1.11" ) @property def __snake_case ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __snake_case ( self : List[str] ): return 1e-4 @property def __snake_case ( self : Union[str, Any] ): return 12
51
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =LEDConfig _lowerCamelCase ={} _lowerCamelCase ="gelu" def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after UpperCAmelCase = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests UpperCAmelCase = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ ) UpperCAmelCase = tf.concat( [tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , ) UpperCAmelCase = global_attention_mask return config, inputs_dict def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ): UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ ) UpperCAmelCase, UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase = model(a__ , attention_mask=a__ )[0] UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a__ , a__ , rtol=1e-3 ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict: """simple docstring""" if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase =( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase =True _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False def __snake_case ( self : Optional[Any] ): UpperCAmelCase = TFLEDModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ ) def __snake_case ( self : int ): self.config_tester.run_common_tests() def __snake_case ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a__ ) def __snake_case ( self : Optional[int] ): UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] ) UpperCAmelCase = 2 UpperCAmelCase = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) UpperCAmelCase = True UpperCAmelCase = self.model_tester.seq_length UpperCAmelCase = self.model_tester.encoder_seq_length def check_decoder_attentions_output(a__ : Tuple ): UpperCAmelCase = outputs.decoder_attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(a__ : int ): UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions] UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) UpperCAmelCase = len(a__ ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) if self.is_encoder_decoder: UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_decoder_attentions_output(a__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) # Check attention is always last and order is fine UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) ) self.assertEqual(model.config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def __snake_case ( self : Any ): pass def __snake_case ( self : Union[str, Any] ): # TODO: Head-masking not yet implement pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: """simple docstring""" return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa ) a__ : int = 1e-4 @slow @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, 768) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 ) def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
51
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Optional[int] ): UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) ) UpperCAmelCase = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase = os.path.join(self.tmpdirname , a__ ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) # load decoder from hub UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder''' def __snake_case ( self : int , **a__ : Optional[int] ): UpperCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(a__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ ) def __snake_case ( self : int , **a__ : str ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ ) def __snake_case ( self : List[str] , **a__ : Optional[Any] ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ ) def __snake_case ( self : int ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , a__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , a__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , a__ ) def __snake_case ( self : Any ): UpperCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(a__ , '''include''' ): WavaVecaProcessorWithLM( tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = floats_list((3, 1000) ) UpperCAmelCase = feature_extractor(a__ , return_tensors='''np''' ) UpperCAmelCase = processor(a__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __snake_case ( self : Any ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = '''This is a test string''' UpperCAmelCase = processor(text=a__ ) UpperCAmelCase = tokenizer(a__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __snake_case ( self : int , a__ : int=(2, 10, 16) , a__ : List[Any]=77 ): np.random.seed(a__ ) return np.random.rand(*a__ ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase = processor.decode(a__ ) UpperCAmelCase = decoder.decode_beams(a__ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __snake_case ( self : Any , a__ : str ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase = processor.batch_decode(a__ ) else: with get_context(a__ ).Pool() as pool: UpperCAmelCase = processor.batch_decode(a__ , a__ ) UpperCAmelCase = list(a__ ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase = decoder.decode_beams_batch(a__ , a__ ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(a__ , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(a__ , decoded_processor.logit_score ) self.assertListEqual(a__ , decoded_processor.lm_score ) def __snake_case ( self : Dict ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = 15 UpperCAmelCase = -20.0 UpperCAmelCase = -4.0 UpperCAmelCase = processor.batch_decode( a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , ) UpperCAmelCase = decoded_processor_out.text UpperCAmelCase = list(a__ ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase = decoder.decode_beams_batch( a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , ) UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(a__ , a__ ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , a__ ) self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , a__ , atol=1e-3 ) ) self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , a__ , atol=1e-3 ) ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = 2.0 UpperCAmelCase = 5.0 UpperCAmelCase = -20.0 UpperCAmelCase = True UpperCAmelCase = processor.batch_decode( a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , ) UpperCAmelCase = decoded_processor_out.text UpperCAmelCase = list(a__ ) decoder.reset_params( alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase = decoder.decode_beams_batch( a__ , a__ , ) UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(a__ , a__ ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , a__ ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , a__ ) def __snake_case ( self : List[Any] ): UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(a__ , a__ ) def __snake_case ( self : str ): UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(a__ ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = os.listdir(a__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(a__ , a__ ) def __snake_case ( self : List[Any] ): UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = floats_list((3, 1000) ) UpperCAmelCase = processor_wavaveca(a__ , return_tensors='''np''' ) UpperCAmelCase = processor_auto(a__ , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = processor_wavaveca.batch_decode(a__ ) UpperCAmelCase = processor_auto.batch_decode(a__ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __snake_case ( a__ : Tuple , a__ : int ): UpperCAmelCase = [d[key] for d in offsets] return retrieved_list def __snake_case ( self : Dict ): UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = self._get_dummy_logits()[0] UpperCAmelCase = processor.decode(a__ , output_word_offsets=a__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(a__ , a__ ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __snake_case ( self : Any ): UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = processor.batch_decode(a__ , output_word_offsets=a__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(a__ , a__ ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __snake_case ( self : Dict ): import torch UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=a__ ) UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) ) UpperCAmelCase = iter(a__ ) UpperCAmelCase = next(a__ ) UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase = model(a__ ).logits.cpu().numpy() UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=a__ ) UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , a__ ) self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , output.text ) # output times UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''start_time''' ) ) UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''end_time''' ) ) # fmt: off UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) ) self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> int: """simple docstring""" UpperCAmelCase = args.pruning_method UpperCAmelCase = args.threshold UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) UpperCAmelCase = args.target_model_path print(f"Load fine-pruned model from {model_name_or_path}" ) UpperCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''pytorch_model.bin''' ) ) UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: UpperCAmelCase = tensor print(f"Copied layer {name}" ) elif "classifier" in name or "qa_output" in name: UpperCAmelCase = tensor print(f"Copied layer {name}" ) elif "bias" in name: UpperCAmelCase = tensor print(f"Copied layer {name}" ) else: if pruning_method == "magnitude": UpperCAmelCase = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = tensor * mask print(f"Pruned layer {name}" ) elif pruning_method == "topK": if "mask_scores" in name: continue UpperCAmelCase = name[:-6] UpperCAmelCase = model[f"{prefix_}mask_scores"] UpperCAmelCase = TopKBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = tensor * mask print(f"Pruned layer {name}" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue UpperCAmelCase = name[:-6] UpperCAmelCase = model[f"{prefix_}mask_scores"] UpperCAmelCase = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = tensor * mask print(f"Pruned layer {name}" ) elif pruning_method == "l0": if "mask_scores" in name: continue UpperCAmelCase = name[:-6] UpperCAmelCase = model[f"{prefix_}mask_scores"] UpperCAmelCase, UpperCAmelCase = -0.1, 1.1 UpperCAmelCase = torch.sigmoid(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = s * (r - l) + l UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) UpperCAmelCase = tensor * mask print(f"Pruned layer {name}" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: UpperCAmelCase = os.path.join( os.path.dirname(SCREAMING_SNAKE_CASE_ ) , f"bertarized_{os.path.basename(SCREAMING_SNAKE_CASE_ )}" ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): shutil.copytree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(f"\nCreated folder {target_model_path}" ) torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) a__ : List[Any] = parser.parse_args() main(args)
51
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a__ : List[Any] = 'sshleifer/mar_enro_6_3_student' class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __snake_case ( self : Dict ): super().setUp() UpperCAmelCase = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , ) UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def __snake_case ( self : Optional[int] ): MarianMTModel.from_pretrained(a__ ) @slow @require_torch_gpu def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __snake_case ( self : Any ): UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCAmelCase = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script UpperCAmelCase = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' ) UpperCAmelCase = 6 UpperCAmelCase = ( ['''distillation.py'''] + bash_script.split() + [ f"--output_dir={output_dir}", '''--gpus=1''', '''--learning_rate=1e-3''', f"--num_train_epochs={epochs}", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCAmelCase = distill_main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
51
1
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent a__ : int = {'UserAgent': UserAgent().random} def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> dict: """simple docstring""" UpperCAmelCase = script.contents[0] UpperCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , a__ : str ): UpperCAmelCase = f"https://www.instagram.com/{username}/" UpperCAmelCase = self.get_json() def __snake_case ( self : Any ): UpperCAmelCase = requests.get(self.url , headers=a__ ).text UpperCAmelCase = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ): return f"{self.__class__.__name__}('{self.username}')" def __str__( self : Optional[int] ): return f"{self.fullname} ({self.username}) is {self.biography}" @property def __snake_case ( self : Tuple ): return self.user_data["username"] @property def __snake_case ( self : List[str] ): return self.user_data["full_name"] @property def __snake_case ( self : int ): return self.user_data["biography"] @property def __snake_case ( self : Tuple ): return self.user_data["business_email"] @property def __snake_case ( self : Union[str, Any] ): return self.user_data["external_url"] @property def __snake_case ( self : str ): return self.user_data["edge_followed_by"]["count"] @property def __snake_case ( self : List[Any] ): return self.user_data["edge_follow"]["count"] @property def __snake_case ( self : List[str] ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __snake_case ( self : List[str] ): return self.user_data["profile_pic_url_hd"] @property def __snake_case ( self : Union[str, Any] ): return self.user_data["is_verified"] @property def __snake_case ( self : int ): return self.user_data["is_private"] def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions UpperCAmelCase = InstagramUser(SCREAMING_SNAKE_CASE_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() a__ : Optional[int] = InstagramUser('github') print(instagram_user) print(F"""{instagram_user.number_of_posts = }""") print(F"""{instagram_user.number_of_followers = }""") print(F"""{instagram_user.number_of_followings = }""") print(F"""{instagram_user.email = }""") print(F"""{instagram_user.website = }""") print(F"""{instagram_user.profile_picture_url = }""") print(F"""{instagram_user.is_verified = }""") print(F"""{instagram_user.is_private = }""")
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
51
1
'''simple docstring''' from __future__ import annotations def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float: """simple docstring""" UpperCAmelCase = 0.00 UpperCAmelCase = 0 for resistor in resistors: if resistor <= 0: UpperCAmelCase = f"Resistor at index {index} has a negative or zero value!" raise ValueError(SCREAMING_SNAKE_CASE_ ) first_sum += 1 / float(SCREAMING_SNAKE_CASE_ ) index += 1 return 1 / first_sum def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float: """simple docstring""" UpperCAmelCase = 0.00 UpperCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCAmelCase = f"Resistor at index {index} has a negative value!" raise ValueError(SCREAMING_SNAKE_CASE_ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
51
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
51
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : List[str] = 16 a__ : int = 32 def __snake_case ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ) -> int: """simple docstring""" UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE_ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase = 8 else: UpperCAmelCase = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : Optional[Any] = mocked_dataloaders # noqa: F811 def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> Any: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE_ ) == "1": UpperCAmelCase = 2 # Initialize accelerator UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config['''lr'''] UpperCAmelCase = int(config['''num_epochs'''] ) UpperCAmelCase = int(config['''seed'''] ) UpperCAmelCase = int(config['''batch_size'''] ) UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = outputs.loss UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() UpperCAmelCase = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = outputs.logits.argmax(dim=-1 ) UpperCAmelCase, UpperCAmelCase = accelerator.gather((predictions, batch['''labels''']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(SCREAMING_SNAKE_CASE_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ ) def __snake_case ( ) -> Optional[int]: """simple docstring""" UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
51
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ): pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str: """simple docstring""" UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict: """simple docstring""" UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _lowerCamelCase =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ): UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __snake_case ( self : int , a__ : Dict , a__ : Tuple ): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def __snake_case ( self : str ): pass @slow @require_torch def __snake_case ( self : Optional[Any] ): UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871} ] , ) # fmt: on @require_torch @slow def __snake_case ( self : Dict ): UpperCAmelCase = '''facebook/sam-vit-huge''' UpperCAmelCase = pipeline('''mask-generation''' , model=a__ ) UpperCAmelCase = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, ] , )
51
1
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =ConsistencyModelPipeline _lowerCamelCase =UNCONDITIONAL_IMAGE_GENERATION_PARAMS _lowerCamelCase =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _lowerCamelCase =frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def __snake_case ( self : List[str] ): UpperCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def __snake_case ( self : int ): UpperCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def __snake_case ( self : int , a__ : List[Any]=False ): if class_cond: UpperCAmelCase = self.dummy_cond_unet else: UpperCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler UpperCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def __snake_case ( self : Any , a__ : Optional[Any] , a__ : List[str]=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def __snake_case ( self : Optional[Any] ): UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = ConsistencyModelPipeline(**a__ ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __snake_case ( self : int ): UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components(class_cond=a__ ) UpperCAmelCase = ConsistencyModelPipeline(**a__ ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = 0 UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __snake_case ( self : int ): UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = ConsistencyModelPipeline(**a__ ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = 1 UpperCAmelCase = None UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components(class_cond=a__ ) UpperCAmelCase = ConsistencyModelPipeline(**a__ ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = 1 UpperCAmelCase = None UpperCAmelCase = 0 UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 32, 32, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : str , a__ : Optional[Any]=0 , a__ : List[str]=False , a__ : Tuple="cpu" , a__ : Optional[Any]=torch.floataa , a__ : List[Any]=(1, 3, 64, 64) ): UpperCAmelCase = torch.manual_seed(a__ ) UpperCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: UpperCAmelCase = self.get_fixed_latents(seed=a__ , device=a__ , dtype=a__ , shape=a__ ) UpperCAmelCase = latents return inputs def __snake_case ( self : List[Any] , a__ : Optional[Any]=0 , a__ : int="cpu" , a__ : Any=torch.floataa , a__ : List[str]=(1, 3, 64, 64) ): if type(a__ ) == str: UpperCAmelCase = torch.device(a__ ) UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ ) return latents def __snake_case ( self : Tuple ): UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) UpperCAmelCase = ConsistencyModelPipeline(unet=a__ , scheduler=a__ ) pipe.to(torch_device=a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_inputs() UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def __snake_case ( self : Optional[Any] ): UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) UpperCAmelCase = ConsistencyModelPipeline(unet=a__ , scheduler=a__ ) pipe.to(torch_device=a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_inputs() UpperCAmelCase = 1 UpperCAmelCase = None UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) UpperCAmelCase = ConsistencyModelPipeline(unet=a__ , scheduler=a__ ) pipe.to(torch_device=a__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_inputs(get_fixed_latents=a__ , device=a__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a__ , enable_math=a__ , enable_mem_efficient=a__ ): UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def __snake_case ( self : Any ): UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) UpperCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) UpperCAmelCase = ConsistencyModelPipeline(unet=a__ , scheduler=a__ ) pipe.to(torch_device=a__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_inputs(get_fixed_latents=a__ , device=a__ ) UpperCAmelCase = 1 UpperCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a__ , enable_math=a__ , enable_mem_efficient=a__ ): UpperCAmelCase = pipe(**a__ ).images assert image.shape == (1, 64, 64, 3) UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
51
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
1
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , a__ : Tuple , a__ : Optional[Any]=13 , a__ : Tuple=7 , a__ : Dict=True , a__ : Dict=True , a__ : List[Any]=False , a__ : int=True , a__ : Optional[Any]=99 , a__ : Union[str, Any]=64 , a__ : Optional[int]=5 , a__ : Union[str, Any]=4 , a__ : Any=64 , a__ : Optional[int]="gelu" , a__ : Optional[int]=0.1 , a__ : Dict=0.1 , a__ : Optional[Any]=512 , a__ : Optional[Any]=16 , a__ : Dict=2 , a__ : int=0.02 , a__ : int=3 , a__ : str=4 , a__ : List[str]=None , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def __snake_case ( self : List[str] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def __snake_case ( self : str ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self : Union[str, Any] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __snake_case ( self : List[Any] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : str , a__ : str ): UpperCAmelCase = MPNetModel(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __snake_case ( self : str , a__ : Tuple , a__ : List[str] , a__ : int , a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ): UpperCAmelCase = MPNetForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model( a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self : Union[str, Any] , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = MPNetForSequenceClassification(a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Any , a__ : int , a__ : Any , a__ : str , a__ : Dict , a__ : Tuple , a__ : List[Any] ): UpperCAmelCase = self.num_choices UpperCAmelCase = MPNetForMultipleChoice(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = model( a__ , attention_mask=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self : List[str] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any ): UpperCAmelCase = self.num_labels UpperCAmelCase = MPNetForTokenClassification(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self : str ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _lowerCamelCase =( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase =False _lowerCamelCase =True def __snake_case ( self : Optional[int] ): UpperCAmelCase = MPNetModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 ) def __snake_case ( self : Optional[int] ): self.config_tester.run_common_tests() def __snake_case ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*a__ ) def __snake_case ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ ) def __snake_case ( self : Any ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*a__ ) def __snake_case ( self : int ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*a__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : Tuple ): UpperCAmelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCAmelCase = model(a__ )[0] UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a__ ) UpperCAmelCase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
51
'''simple docstring''' import torch from transformers import AutoModel class lowerCAmelCase__ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ): super(a__ , self ).__init__() UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ ) UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 ) UpperCAmelCase = torch.nn.Softmax(dim=1 ) def __snake_case ( self : List[Any] , **a__ : Tuple ): return self.bert(**a__ ).last_hidden_state def __snake_case ( self : int , a__ : List[str] ): return token_embeddings.sum(2 , keepdim=a__ ) def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ): return self.softmax(T * self.cos(a__ , a__ ) ) def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ): UpperCAmelCase = W_supports['''sizes'''].tolist() UpperCAmelCase = W_supports['''start_token_id'''].item() UpperCAmelCase = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = W_supports['''input_ids'''] == start_token_id UpperCAmelCase = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(a__ ): if i == 0: UpperCAmelCase = 0 else: UpperCAmelCase = support_sizes[i - 1] UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]] UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]] UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCAmelCase = torch.vstack((p_starts, p_start) ) UpperCAmelCase = torch.vstack((p_ends, p_end) ) else: UpperCAmelCase = p_start UpperCAmelCase = p_end return p_starts, p_ends
51
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , a__ : str , a__ : Optional[Any]=2 , a__ : Dict=8 , a__ : Optional[Any]=True , a__ : List[Any]=True , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[Any]=99 , a__ : Dict=16 , a__ : Optional[int]=5 , a__ : str=2 , a__ : List[Any]=36 , a__ : Optional[Any]="gelu" , a__ : List[str]=0.0 , a__ : Union[str, Any]=0.0 , a__ : int=512 , a__ : str=16 , a__ : str=2 , a__ : Any=0.02 , a__ : Tuple=3 , a__ : Any=4 , a__ : int=None , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def __snake_case ( self : Optional[int] ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self : Any ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.get_config() UpperCAmelCase = 300 return config def __snake_case ( self : Optional[Any] ): ( ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ) = self.prepare_config_and_inputs() UpperCAmelCase = True UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __snake_case ( self : Tuple , a__ : str , a__ : Tuple , a__ : Dict , a__ : Any , a__ : Any , a__ : Tuple , a__ : Tuple ): UpperCAmelCase = MraModel(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ ) UpperCAmelCase = model(a__ , token_type_ids=a__ ) UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self : Optional[Any] , a__ : int , a__ : str , a__ : Optional[Any] , a__ : Optional[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : str , ): UpperCAmelCase = True UpperCAmelCase = MraModel(a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , ) UpperCAmelCase = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , ) UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self : Dict , a__ : List[str] , a__ : Tuple , a__ : List[str] , a__ : Union[str, Any] , a__ : Any , a__ : int , a__ : List[Any] ): UpperCAmelCase = MraForMaskedLM(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case ( self : List[str] , a__ : str , a__ : int , a__ : List[str] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[Any] , a__ : List[str] ): UpperCAmelCase = MraForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model( a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self : Dict , a__ : Dict , a__ : int , a__ : Optional[int] , a__ : Dict , a__ : List[Any] , a__ : Any , a__ : Any ): UpperCAmelCase = self.num_labels UpperCAmelCase = MraForSequenceClassification(a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Dict , a__ : Optional[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : Union[str, Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = MraForTokenClassification(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self : Optional[int] , a__ : Any , a__ : int , a__ : Any , a__ : int , a__ : str , a__ : Any , a__ : Any ): UpperCAmelCase = self.num_choices UpperCAmelCase = MraForMultipleChoice(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = model( a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =() def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = MraModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 ) def __snake_case ( self : Dict ): self.config_tester.run_common_tests() def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase = type self.model_tester.create_and_check_model(*a__ ) def __snake_case ( self : int ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a__ ) def __snake_case ( self : int ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) def __snake_case ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) @slow def __snake_case ( self : str ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = MraModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason='''MRA does not output attentions''' ) def __snake_case ( self : int ): return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : str ): UpperCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) UpperCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase = model(a__ )[0] UpperCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , a__ ) UpperCAmelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) ) @slow def __snake_case ( self : Tuple ): UpperCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) UpperCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase = model(a__ )[0] UpperCAmelCase = 50265 UpperCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , a__ ) UpperCAmelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) ) @slow def __snake_case ( self : Any ): UpperCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) UpperCAmelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase = model(a__ )[0] UpperCAmelCase = 50265 UpperCAmelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , a__ ) UpperCAmelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
51
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =(EulerDiscreteScheduler,) _lowerCamelCase =10 def __snake_case ( self : str , **a__ : Tuple ): UpperCAmelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**a__ ) return config def __snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=a__ ) def __snake_case ( self : Optional[int] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=a__ , beta_end=a__ ) def __snake_case ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a__ ) def __snake_case ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 0.0_002 ) < 1e-2 assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3 def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
51
1
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ : Optional[int] = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] ) -> int: """simple docstring""" UpperCAmelCase = to_pil_image(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = pil_image.size UpperCAmelCase = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type='''dict''' , config=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates UpperCAmelCase = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()] UpperCAmelCase = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] UpperCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] UpperCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] UpperCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] UpperCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase = [] for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = [x, y, x + w, y + h] actual_boxes.append(SCREAMING_SNAKE_CASE_ ) # finally, normalize the bounding boxes UpperCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : Union[str, Any] , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : float = 1 / 255 , a__ : bool = True , a__ : Union[float, Iterable[float]] = None , a__ : Union[float, Iterable[float]] = None , a__ : bool = True , a__ : Optional[str] = None , a__ : Optional[str] = "" , **a__ : str , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_value UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase = apply_ocr UpperCAmelCase = ocr_lang UpperCAmelCase = tesseract_config def __snake_case ( self : Any , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Union[str, Any] , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) UpperCAmelCase = (size['''height'''], size['''width''']) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Tuple , a__ : np.ndarray , a__ : Union[int, float] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, Iterable[float]] , a__ : Union[float, Iterable[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : str , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : str=None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : Union[float, Iterable[float]] = None , a__ : Union[float, Iterable[float]] = None , a__ : bool = None , a__ : Optional[str] = None , a__ : Optional[str] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase = make_list_of_images(a__ ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(a__ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) UpperCAmelCase = [] UpperCAmelCase = [] for image in images: UpperCAmelCase, UpperCAmelCase = apply_tesseract(a__ , a__ , a__ ) words_batch.append(a__ ) boxes_batch.append(a__ ) if do_resize: UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=a__ , scale=a__ ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images] UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images] UpperCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=a__ ) if apply_ocr: UpperCAmelCase = words_batch UpperCAmelCase = boxes_batch return data
51
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any: """simple docstring""" return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =field( metadata={"help": "The csv file to plot."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." } , ) _lowerCamelCase =field( default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , ) _lowerCamelCase =list_field( default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: """simple docstring""" try: int(SCREAMING_SNAKE_CASE_ ) return True except ValueError: return False def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str: """simple docstring""" try: float(SCREAMING_SNAKE_CASE_ ) return True except ValueError: return False class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict , a__ : Optional[int] ): UpperCAmelCase = args UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: UpperCAmelCase = csv.DictReader(a__ ) for row in reader: UpperCAmelCase = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None UpperCAmelCase = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None UpperCAmelCase = float(row['''result'''] ) def __snake_case ( self : Dict ): UpperCAmelCase, UpperCAmelCase = plt.subplots() UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage''' UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) UpperCAmelCase = self.result_dict[model_name]['''result'''] ((UpperCAmelCase), (UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , ) else: UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((UpperCAmelCase), (UpperCAmelCase)) = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )] plt.scatter( a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" ) plt.plot(a__ , a__ , '''--''' ) title_str += f" {label_model_name} vs." UpperCAmelCase = title_str[:-4] UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(a__ ) plt.xlabel(a__ ) plt.ylabel(a__ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __snake_case ( ) -> Tuple: """simple docstring""" UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = parser.parse_args_into_dataclasses()[0] UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ ) plot.plot() if __name__ == "__main__": main()
51
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase_ ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization _lowerCamelCase =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _lowerCamelCase =Features({"text": Value("string" )} ) _lowerCamelCase =Features({"labels": ClassLabel} ) _lowerCamelCase ="text" _lowerCamelCase ="labels" def __snake_case ( self : str , a__ : str ): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , a__ ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) UpperCAmelCase = copy.deepcopy(self ) UpperCAmelCase = self.label_schema.copy() UpperCAmelCase = features[self.label_column] UpperCAmelCase = label_schema return task_template @property def __snake_case ( self : Dict ): return { self.text_column: "text", self.label_column: "labels", }
51
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ): UpperCAmelCase = [] for k, v in d.items(): UpperCAmelCase = parent_key + sep + k if parent_key else k if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() ) else: items.append((new_key, v) ) return dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = argparse.Namespace() with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file: try: UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader ) UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ ) for k, v in flat_cfg.items(): setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) ) return config def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase = MobileViTVaConfig() UpperCAmelCase = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase = 151 UpperCAmelCase = 512 UpperCAmelCase = '''ade20k-id2label.json''' UpperCAmelCase = True elif task_name.startswith('''voc_''' ): UpperCAmelCase = 21 UpperCAmelCase = 512 UpperCAmelCase = '''pascal-voc-id2label.json''' UpperCAmelCase = True # orig_config UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ ) assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: """simple docstring""" UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = val def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int: """simple docstring""" if base_model: UpperCAmelCase = '''''' else: UpperCAmelCase = '''mobilevitv2.''' UpperCAmelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase = k[8:] else: UpperCAmelCase = k if ".block." in k: UpperCAmelCase = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." ) for i in [1, 2]: if f"layer_{i}." in k: UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f"layer_{i}.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if f"layer_{i}.1.local_rep.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if f"layer_{i}.1.local_rep.1." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase = [0, 1] elif i == 4: UpperCAmelCase = [0, 1, 2, 3] elif i == 5: UpperCAmelCase = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int: """simple docstring""" UpperCAmelCase = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(SCREAMING_SNAKE_CASE_ ) for k in keys_to_ignore: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __snake_case ( ) -> List[Any]: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False else: UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False # remove and rename some keys of load the original model UpperCAmelCase = checkpoint remove_unused_keys(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load modified state_dict model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase = outputs.logits UpperCAmelCase = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ) assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) a__ : str = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
51
1
'''simple docstring''' from pathlib import Path import fire def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> int: """simple docstring""" UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) for path in src_dir.iterdir(): UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] UpperCAmelCase = dest_dir.joinpath(path.name ) print(SCREAMING_SNAKE_CASE_ ) dest_path.open('''w''' ).write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": fire.Fire(minify)
51
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="megatron-bert" def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ): super().__init__(pad_token_id=a__ , **a__ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = position_embedding_type UpperCAmelCase = use_cache
51
1
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Any: """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match" UpperCAmelCase = nn.Parameter(SCREAMING_SNAKE_CASE_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match" UpperCAmelCase = nn.Parameter(SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = np.asarray(weights[0] ) UpperCAmelCase = np.asarray(weights[1] ) UpperCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any: """simple docstring""" UpperCAmelCase = np.asarray(weights[0] ) UpperCAmelCase = np.asarray(weights[1] ) UpperCAmelCase = np.asarray(weights[2] ) UpperCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , ) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase = weights[0][0][0] UpperCAmelCase = np.asarray(layer_norm_a[0] ) UpperCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # lsh weights + output UpperCAmelCase = weights[0][1] if len(SCREAMING_SNAKE_CASE_ ) < 4: set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ ) else: set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ ) # intermediate weighs UpperCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(SCREAMING_SNAKE_CASE_ ) == 4: UpperCAmelCase = intermediate_weights[2] # layernorm 2 UpperCAmelCase = np.asarray(intermediate_weights[0][0] ) UpperCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # intermediate dense UpperCAmelCase = np.asarray(intermediate_weights[1][0] ) UpperCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # intermediate out UpperCAmelCase = np.asarray(intermediate_weights[4][0] ) UpperCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase = torch_model.reformer # word embeds UpperCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): UpperCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"{position_embeddings[emb_idx]} emb does not match" UpperCAmelCase = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): UpperCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # output layer norm UpperCAmelCase = np.asarray(weights[7][0] ) UpperCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # output embeddings UpperCAmelCase = np.asarray(weights[9][0] ) UpperCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"Building PyTorch model from configuration: {config}" ) UpperCAmelCase = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f: UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE_ )['''weights'''] set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a__ : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
51
'''simple docstring''' from __future__ import annotations a__ : List[str] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ): UpperCAmelCase = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase = {} UpperCAmelCase = source_vertex def __snake_case ( self : Optional[int] ): UpperCAmelCase = {self.source_vertex} UpperCAmelCase = None UpperCAmelCase = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(a__ ) UpperCAmelCase = vertex queue.append(a__ ) def __snake_case ( self : Any , a__ : str ): if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase = self.parent.get(a__ ) if target_vertex_parent is None: UpperCAmelCase = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(a__ ) return self.shortest_path(a__ ) + f"->{target_vertex}" if __name__ == "__main__": a__ : Tuple = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
51
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =42 _lowerCamelCase =None _lowerCamelCase =None def __snake_case ( ) -> Node | None: """simple docstring""" UpperCAmelCase = Node(1 ) UpperCAmelCase = Node(2 ) UpperCAmelCase = Node(3 ) UpperCAmelCase = Node(4 ) UpperCAmelCase = Node(5 ) return tree def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> Sequence[Node | None]: """simple docstring""" UpperCAmelCase = [] if root is None: return output UpperCAmelCase = deque([root] ) while process_queue: UpperCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> Sequence[Node | None]: """simple docstring""" UpperCAmelCase = [] def populate_output(SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> Sequence[Node | None]: """simple docstring""" UpperCAmelCase = [] def populate_output(SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def __snake_case ( SCREAMING_SNAKE_CASE_ : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] UpperCAmelCase = [] UpperCAmelCase = 0 UpperCAmelCase = height(SCREAMING_SNAKE_CASE_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = 0 return output def __snake_case ( ) -> None: # Main function for testing. """simple docstring""" UpperCAmelCase = make_tree() print(f"In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}" ) print(f"Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}" ) print(f"Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}" , '''\n''' ) print(f"Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}" , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(SCREAMING_SNAKE_CASE_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ): print(f"Level {level}:" , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , a__ : List[str] , a__ : str ): UpperCAmelCase = name UpperCAmelCase = val def __str__( self : Optional[Any] ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self : Union[str, Any] , a__ : Union[str, Any] ): return self.val < other.val class lowerCAmelCase__ : '''simple docstring''' def __init__( self : List[Any] , a__ : Union[str, Any] ): UpperCAmelCase = {} UpperCAmelCase = {} UpperCAmelCase = self.build_heap(a__ ) def __getitem__( self : int , a__ : Any ): return self.get_value(a__ ) def __snake_case ( self : List[Any] , a__ : Optional[Any] ): return (idx - 1) // 2 def __snake_case ( self : str , a__ : Optional[int] ): return idx * 2 + 1 def __snake_case ( self : Optional[int] , a__ : Dict ): return idx * 2 + 2 def __snake_case ( self : int , a__ : Dict ): return self.heap_dict[key] def __snake_case ( self : List[Any] , a__ : Optional[Any] ): UpperCAmelCase = len(a__ ) - 1 UpperCAmelCase = self.get_parent_idx(a__ ) for idx, i in enumerate(a__ ): UpperCAmelCase = idx UpperCAmelCase = i.val for i in range(a__ , -1 , -1 ): self.sift_down(a__ , a__ ) return array def __snake_case ( self : List[Any] , a__ : Optional[Any] , a__ : Optional[Any] ): while True: UpperCAmelCase = self.get_left_child_idx(a__ ) # noqa: E741 UpperCAmelCase = self.get_right_child_idx(a__ ) UpperCAmelCase = idx if l < len(a__ ) and array[l] < array[idx]: UpperCAmelCase = l if r < len(a__ ) and array[r] < array[smallest]: UpperCAmelCase = r if smallest != idx: UpperCAmelCase, UpperCAmelCase = array[smallest], array[idx] ( ( UpperCAmelCase ), ( UpperCAmelCase ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase = smallest else: break def __snake_case ( self : Union[str, Any] , a__ : Optional[int] ): UpperCAmelCase = self.get_parent_idx(a__ ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase, UpperCAmelCase = self.heap[idx], self.heap[p] UpperCAmelCase, UpperCAmelCase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase = p UpperCAmelCase = self.get_parent_idx(a__ ) def __snake_case ( self : Optional[Any] ): return self.heap[0] def __snake_case ( self : List[str] ): UpperCAmelCase, UpperCAmelCase = self.heap[-1], self.heap[0] UpperCAmelCase, UpperCAmelCase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def __snake_case ( self : Optional[Any] , a__ : Optional[Any] ): self.heap.append(a__ ) UpperCAmelCase = len(self.heap ) - 1 UpperCAmelCase = node.val self.sift_up(len(self.heap ) - 1 ) def __snake_case ( self : Tuple ): return len(self.heap ) == 0 def __snake_case ( self : Any , a__ : List[Any] , a__ : int ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase = new_value UpperCAmelCase = new_value self.sift_up(self.idx_of_element[node] ) a__ : List[Any] = Node('R', -1) a__ : List[Any] = Node('B', 6) a__ : Optional[Any] = Node('A', 3) a__ : Any = Node('X', 1) a__ : Any = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array a__ : Dict = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
51
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Any = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' from __future__ import annotations a__ : Union[str, Any] = 'Muhammad Umer Farooq' a__ : Dict = 'MIT' a__ : Optional[Any] = '1.0.0' a__ : Tuple = 'Muhammad Umer Farooq' a__ : Any = '[email protected]' a__ : List[Any] = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , a__ : str ): super().__init__() UpperCAmelCase = [] UpperCAmelCase = domain def __snake_case ( self : int , a__ : str , a__ : list[tuple[str, str | None]] ): # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCAmelCase = parse.urljoin(self.domain , a__ ) self.urls.append(a__ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE_ ).split('''.''' )[-2:] ) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return parse.urlparse(SCREAMING_SNAKE_CASE_ ).netloc def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "https://github.com" ) -> list[str]: """simple docstring""" UpperCAmelCase = get_domain_name(SCREAMING_SNAKE_CASE_ ) # Initialize the parser UpperCAmelCase = Parser(SCREAMING_SNAKE_CASE_ ) try: # Open URL UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCAmelCase = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ ) # Get the valid email. UpperCAmelCase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(SCREAMING_SNAKE_CASE_ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : str = emails_from_url('https://github.com') print(F"""{len(emails)} emails found:""") print('\n'.join(sorted(emails)))
51
'''simple docstring''' from math import factorial def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int: """simple docstring""" return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
51
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : str ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __snake_case ( self : Optional[int] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , ) return self.image_processor_class @property def __snake_case ( self : List[Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , ) return self.image_processor
51
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =StableUnCLIPPipeline _lowerCamelCase =TEXT_TO_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase =False def __snake_case ( self : str ): UpperCAmelCase = 32 UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , ) torch.manual_seed(0 ) UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ ) UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , ) torch.manual_seed(0 ) UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase = AutoencoderKL() UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self : List[Any] ): UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=a__ ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Optional[int] ): UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' ) UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a__ , a__ ) def __snake_case ( self : str ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) UpperCAmelCase = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
51
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer a__ : int = logging.get_logger(__name__) a__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__ : int = { 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } a__ : List[str] = {'mobilebert-uncased': 512} a__ : Union[str, Any] = {} class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =VOCAB_FILES_NAMES _lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase =PRETRAINED_INIT_CONFIGURATION _lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase =MobileBertTokenizer def __init__( self : Dict , a__ : Optional[int]=None , a__ : Tuple=None , a__ : Optional[Any]=True , a__ : Any="[UNK]" , a__ : int="[SEP]" , a__ : Optional[int]="[PAD]" , a__ : int="[CLS]" , a__ : List[str]="[MASK]" , a__ : Tuple=True , a__ : List[str]=None , **a__ : Any , ): super().__init__( a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , ) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , a__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , a__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , a__ ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(a__ , normalizer_state.pop('''type''' ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**a__ ) UpperCAmelCase = do_lower_case def __snake_case ( self : int , a__ : List[str] , a__ : Optional[int]=None ): UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ): UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case ( self : Tuple , a__ : str , a__ : Optional[str] = None ): UpperCAmelCase = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ )
51
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict: """simple docstring""" if not head: return True # split the list to two parts UpperCAmelCase, UpperCAmelCase = head.next, head while fast and fast.next: UpperCAmelCase = fast.next.next UpperCAmelCase = slow.next UpperCAmelCase = slow.next UpperCAmelCase = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase = None while second: UpperCAmelCase = second.next UpperCAmelCase = node UpperCAmelCase = second UpperCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase = node.next UpperCAmelCase = head.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head while fast and fast.next: UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase = [slow.val] while slow.next: UpperCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase = cur.next return True def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: """simple docstring""" if not head or not head.next: return True UpperCAmelCase = {} UpperCAmelCase = 0 while head: if head.val in d: d[head.val].append(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = [pos] UpperCAmelCase = head.next pos += 1 UpperCAmelCase = pos - 1 UpperCAmelCase = 0 for v in d.values(): if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0: middle += 1 else: UpperCAmelCase = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
51
1
'''simple docstring''' from collections import defaultdict class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , a__ : Optional[int] , a__ : Union[str, Any] ): UpperCAmelCase = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCAmelCase = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) ) ] UpperCAmelCase = defaultdict(a__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCAmelCase = (1 << len(a__ )) - 1 def __snake_case ( self : List[Any] , a__ : Union[str, Any] , a__ : List[str] ): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCAmelCase = self.count_ways_until(a__ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCAmelCase = total_ways_util return self.dp[mask][task_no] def __snake_case ( self : int , a__ : Optional[Any] ): # Store the list of persons for each task for i in range(len(a__ ) ): for j in task_performed[i]: self.task[j].append(a__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": a__ : List[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a__ : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : str ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __snake_case ( self : Optional[int] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , ) return self.image_processor_class @property def __snake_case ( self : List[Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , ) return self.image_processor
51
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , a__ : int , a__ : int , a__ : int , a__ : str=0.0 , a__ : Optional[int] = None , a__ : str = "geglu" , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : str = "layer_norm" , a__ : bool = False , ): super().__init__() UpperCAmelCase = only_cross_attention UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: UpperCAmelCase = AdaLayerNorm(a__ , a__ ) elif self.use_ada_layer_norm_zero: UpperCAmelCase = AdaLayerNormZero(a__ , a__ ) else: UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ ) UpperCAmelCase = Attention( query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. UpperCAmelCase = ( AdaLayerNorm(a__ , a__ ) if self.use_ada_layer_norm else nn.LayerNorm(a__ , elementwise_affine=a__ ) ) UpperCAmelCase = Attention( query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none else: UpperCAmelCase = None UpperCAmelCase = None # 3. Feed-forward UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ ) UpperCAmelCase = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ ) # let chunk size default to None UpperCAmelCase = None UpperCAmelCase = 0 def __snake_case ( self : Tuple , a__ : Optional[int] , a__ : int ): # Sets chunk feed-forward UpperCAmelCase = chunk_size UpperCAmelCase = dim def __snake_case ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Dict[str, Any] = None , a__ : Optional[torch.LongTensor] = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: UpperCAmelCase = self.norma(a__ , a__ ) elif self.use_ada_layer_norm_zero: UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = self.norma( a__ , a__ , a__ , hidden_dtype=hidden_states.dtype ) else: UpperCAmelCase = self.norma(a__ ) UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {} UpperCAmelCase = self.attna( a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , ) if self.use_ada_layer_norm_zero: UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output UpperCAmelCase = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: UpperCAmelCase = ( self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ ) ) UpperCAmelCase = self.attna( a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , ) UpperCAmelCase = attn_output + hidden_states # 3. Feed-forward UpperCAmelCase = self.norma(a__ ) if self.use_ada_layer_norm_zero: UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size UpperCAmelCase = torch.cat( [self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: UpperCAmelCase = self.ff(a__ ) if self.use_ada_layer_norm_zero: UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output UpperCAmelCase = ff_output + hidden_states return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , a__ : int , a__ : Optional[int] = None , a__ : int = 4 , a__ : float = 0.0 , a__ : str = "geglu" , a__ : bool = False , ): super().__init__() UpperCAmelCase = int(dim * mult ) UpperCAmelCase = dim_out if dim_out is not None else dim if activation_fn == "gelu": UpperCAmelCase = GELU(a__ , a__ ) if activation_fn == "gelu-approximate": UpperCAmelCase = GELU(a__ , a__ , approximate='''tanh''' ) elif activation_fn == "geglu": UpperCAmelCase = GEGLU(a__ , a__ ) elif activation_fn == "geglu-approximate": UpperCAmelCase = ApproximateGELU(a__ , a__ ) UpperCAmelCase = nn.ModuleList([] ) # project in self.net.append(a__ ) # project dropout self.net.append(nn.Dropout(a__ ) ) # project out self.net.append(nn.Linear(a__ , a__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(a__ ) ) def __snake_case ( self : Any , a__ : List[Any] ): for module in self.net: UpperCAmelCase = module(a__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , a__ : int , a__ : int , a__ : str = "none" ): super().__init__() UpperCAmelCase = nn.Linear(a__ , a__ ) UpperCAmelCase = approximate def __snake_case ( self : List[str] , a__ : Optional[int] ): if gate.device.type != "mps": return F.gelu(a__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __snake_case ( self : Union[str, Any] , a__ : List[Any] ): UpperCAmelCase = self.proj(a__ ) UpperCAmelCase = self.gelu(a__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , a__ : int , a__ : int ): super().__init__() UpperCAmelCase = nn.Linear(a__ , dim_out * 2 ) def __snake_case ( self : List[Any] , a__ : List[str] ): if gate.device.type != "mps": return F.gelu(a__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __snake_case ( self : Dict , a__ : Optional[Any] ): UpperCAmelCase, UpperCAmelCase = self.proj(a__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(a__ ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , a__ : int , a__ : int ): super().__init__() UpperCAmelCase = nn.Linear(a__ , a__ ) def __snake_case ( self : Optional[int] , a__ : Union[str, Any] ): UpperCAmelCase = self.proj(a__ ) return x * torch.sigmoid(1.702 * x ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , a__ : Tuple , a__ : str ): super().__init__() UpperCAmelCase = nn.Embedding(a__ , a__ ) UpperCAmelCase = nn.SiLU() UpperCAmelCase = nn.Linear(a__ , embedding_dim * 2 ) UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ ) def __snake_case ( self : int , a__ : Union[str, Any] , a__ : Optional[int] ): UpperCAmelCase = self.linear(self.silu(self.emb(a__ ) ) ) UpperCAmelCase, UpperCAmelCase = torch.chunk(a__ , 2 ) UpperCAmelCase = self.norm(a__ ) * (1 + scale) + shift return x class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , a__ : List[str] , a__ : List[str] ): super().__init__() UpperCAmelCase = CombinedTimestepLabelEmbeddings(a__ , a__ ) UpperCAmelCase = nn.SiLU() UpperCAmelCase = nn.Linear(a__ , 6 * embedding_dim , bias=a__ ) UpperCAmelCase = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1e-6 ) def __snake_case ( self : List[str] , a__ : str , a__ : List[str] , a__ : List[str] , a__ : int=None ): UpperCAmelCase = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = emb.chunk(6 , dim=1 ) UpperCAmelCase = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , a__ : int , a__ : int , a__ : int , a__ : Optional[str] = None , a__ : float = 1e-5 ): super().__init__() UpperCAmelCase = num_groups UpperCAmelCase = eps if act_fn is None: UpperCAmelCase = None else: UpperCAmelCase = get_activation(a__ ) UpperCAmelCase = nn.Linear(a__ , out_dim * 2 ) def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : int ): if self.act: UpperCAmelCase = self.act(a__ ) UpperCAmelCase = self.linear(a__ ) UpperCAmelCase = emb[:, :, None, None] UpperCAmelCase, UpperCAmelCase = emb.chunk(2 , dim=1 ) UpperCAmelCase = F.group_norm(a__ , self.num_groups , eps=self.eps ) UpperCAmelCase = x * (1 + scale) + shift return x
51
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =XGLMTokenizer _lowerCamelCase =XGLMTokenizerFast _lowerCamelCase =True _lowerCamelCase =True def __snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : List[Any] ): UpperCAmelCase = '''<pad>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(a__ ) , 1008 ) def __snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ ) UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __snake_case ( self : Optional[Any] ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def __snake_case ( self : Optional[int] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(a__ , f.name ) UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ ) UpperCAmelCase = pickle.dumps(a__ ) pickle.loads(a__ ) def __snake_case ( self : Tuple ): if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = '''I was born in 92000, and this is falsé.''' UpperCAmelCase = tokenizer.tokenize(a__ ) UpperCAmelCase = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(a__ ) UpperCAmelCase = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) @slow def __snake_case ( self : int ): UpperCAmelCase = '''Hello World!''' UpperCAmelCase = [2, 31227, 4447, 35] self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : List[str] ): UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) ) @slow def __snake_case ( self : Any ): # fmt: off UpperCAmelCase = { '''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
51
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": a__ : int = input('Enter image url: ').strip() print(F"""Downloading image from {url} ...""") a__ : Dict = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image a__ : List[str] = soup.find('meta', {'property': 'og:image'})['content'] a__ : Optional[int] = requests.get(image_url).content a__ : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
51
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : str = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig: """simple docstring""" UpperCAmelCase = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase = 192 UpperCAmelCase = 768 UpperCAmelCase = 12 UpperCAmelCase = 3 UpperCAmelCase = [800, 1_333] UpperCAmelCase = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase = 330 UpperCAmelCase = 14 UpperCAmelCase = 6 UpperCAmelCase = 1_320 elif "yolos_s" in yolos_name: UpperCAmelCase = 384 UpperCAmelCase = 1_536 UpperCAmelCase = 12 UpperCAmelCase = 6 elif "yolos_b" in yolos_name: UpperCAmelCase = [800, 1_344] UpperCAmelCase = 91 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''coco-detection-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[: config.hidden_size, :] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" if "backbone" in name: UpperCAmelCase = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def __snake_case ( ) -> torch.Tensor: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] # load 🤗 model UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512 UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes UpperCAmelCase, UpperCAmelCase = None, None if yolos_name == "yolos_ti": UpperCAmelCase = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: UpperCAmelCase = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) UpperCAmelCase = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a__ : Optional[Any] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
51
1
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 ) -> int: """simple docstring""" UpperCAmelCase = set(range(3 , SCREAMING_SNAKE_CASE_ , 2 ) ) primes.add(2 ) for p in range(3 , SCREAMING_SNAKE_CASE_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) UpperCAmelCase = [float(SCREAMING_SNAKE_CASE_ ) for n in range(limit + 1 )] for p in primes: for n in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
51
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType a__ : List[Any] = logging.get_logger(__name__) a__ : int = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off a__ : Any = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] a__ : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="whisper" _lowerCamelCase =["past_key_values"] _lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ): UpperCAmelCase = vocab_size UpperCAmelCase = num_mel_bins UpperCAmelCase = d_model UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = max_source_positions UpperCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase = classifier_proj_size UpperCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase = apply_spec_augment UpperCAmelCase = mask_time_prob UpperCAmelCase = mask_time_length UpperCAmelCase = mask_time_min_masks UpperCAmelCase = mask_feature_prob UpperCAmelCase = mask_feature_length UpperCAmelCase = mask_feature_min_masks UpperCAmelCase = median_filter_width super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @property def __snake_case ( self : List[str] ): UpperCAmelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a__ , direction='''inputs''' ) return common_inputs def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ): UpperCAmelCase = OrderedDict() UpperCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , ) UpperCAmelCase = encoder_inputs['''input_features'''].shape[2] UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , a__ , a__ , a__ , a__ ) UpperCAmelCase = encoder_inputs.pop('''input_features''' ) UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCAmelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def __snake_case ( self : Dict ): return 1e-3
51
1
'''simple docstring''' from collections import deque def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> int: """simple docstring""" UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = deque() UpperCAmelCase = [False for _ in range(SCREAMING_SNAKE_CASE_ )] UpperCAmelCase = [-1 for _ in range(SCREAMING_SNAKE_CASE_ )] UpperCAmelCase = index_of[:] def strong_connect(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ): UpperCAmelCase = index # the number when this node is seen UpperCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = True for w in g[v]: if index_of[w] == -1: UpperCAmelCase = strong_connect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: UpperCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: UpperCAmelCase = [] UpperCAmelCase = stack.pop() UpperCAmelCase = False component.append(SCREAMING_SNAKE_CASE_ ) while w != v: UpperCAmelCase = stack.pop() UpperCAmelCase = False component.append(SCREAMING_SNAKE_CASE_ ) components.append(SCREAMING_SNAKE_CASE_ ) return index UpperCAmelCase = [] for v in range(SCREAMING_SNAKE_CASE_ ): if index_of[v] == -1: strong_connect(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ ) return components def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )] for u, v in edges: g[u].append(SCREAMING_SNAKE_CASE_ ) return g if __name__ == "__main__": # Test a__ : Optional[int] = 7 a__ : Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6] a__ : List[str] = [1, 3, 2, 0, 1, 4, 5, 6, 5] a__ : Optional[int] = [(u, v) for u, v in zip(source, target)] a__ : str = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
51
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =LEDConfig _lowerCamelCase ={} _lowerCamelCase ="gelu" def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = eos_token_id UpperCAmelCase = pad_token_id UpperCAmelCase = bos_token_id UpperCAmelCase = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after UpperCAmelCase = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests UpperCAmelCase = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __snake_case ( self : Optional[int] ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ ) UpperCAmelCase = tf.concat( [tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , ) UpperCAmelCase = global_attention_mask return config, inputs_dict def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ): UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder() UpperCAmelCase = inputs_dict['''input_ids'''] UpperCAmelCase = input_ids[:1, :] UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase = 1 # first forward pass UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ ) UpperCAmelCase, UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase = model(a__ , attention_mask=a__ )[0] UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a__ , a__ , rtol=1e-3 ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict: """simple docstring""" if attention_mask is None: UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase =( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase =True _lowerCamelCase =False _lowerCamelCase =False _lowerCamelCase =False def __snake_case ( self : Optional[Any] ): UpperCAmelCase = TFLEDModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ ) def __snake_case ( self : int ): self.config_tester.run_common_tests() def __snake_case ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a__ ) def __snake_case ( self : Optional[int] ): UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] ) UpperCAmelCase = 2 UpperCAmelCase = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) UpperCAmelCase = True UpperCAmelCase = self.model_tester.seq_length UpperCAmelCase = self.model_tester.encoder_seq_length def check_decoder_attentions_output(a__ : Tuple ): UpperCAmelCase = outputs.decoder_attentions self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(a__ : int ): UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions] UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) UpperCAmelCase = len(a__ ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) if self.is_encoder_decoder: UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_decoder_attentions_output(a__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) # Check attention is always last and order is fine UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = model_class(a__ ) UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) ) self.assertEqual(model.config.output_hidden_states , a__ ) check_encoder_attentions_output(a__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def __snake_case ( self : Any ): pass def __snake_case ( self : Union[str, Any] ): # TODO: Head-masking not yet implement pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: """simple docstring""" return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa ) a__ : int = 1e-4 @slow @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, 768) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 ) def __snake_case ( self : str ): UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ ) UpperCAmelCase = model(**a__ )[0] UpperCAmelCase = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , a__ ) # change to expected output here UpperCAmelCase = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
51
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : List[str] = logging.get_logger(__name__) a__ : Any = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="table-transformer" _lowerCamelCase =["past_key_values"] _lowerCamelCase ={ "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : str , a__ : Any=True , a__ : str=None , a__ : Union[str, Any]=3 , a__ : Optional[Any]=100 , a__ : Optional[int]=6 , a__ : List[Any]=2048 , a__ : Optional[Any]=8 , a__ : Dict=6 , a__ : Any=2048 , a__ : List[Any]=8 , a__ : Dict=0.0 , a__ : Dict=0.0 , a__ : List[Any]=True , a__ : int="relu" , a__ : str=256 , a__ : Dict=0.1 , a__ : str=0.0 , a__ : List[Any]=0.0 , a__ : Dict=0.02 , a__ : Union[str, Any]=1.0 , a__ : Tuple=False , a__ : Any="sine" , a__ : Optional[int]="resnet50" , a__ : Dict=True , a__ : Dict=False , a__ : str=1 , a__ : List[str]=5 , a__ : Union[str, Any]=2 , a__ : Optional[int]=1 , a__ : Dict=1 , a__ : str=5 , a__ : List[Any]=2 , a__ : Optional[Any]=0.1 , **a__ : Optional[int] , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(a__ , a__ ): UpperCAmelCase = backbone_config.get('''model_type''' ) UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase = config_class.from_dict(a__ ) # set timm attributes to None UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = None, None, None UpperCAmelCase = use_timm_backbone UpperCAmelCase = backbone_config UpperCAmelCase = num_channels UpperCAmelCase = num_queries UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = init_xavier_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = encoder_layers UpperCAmelCase = auxiliary_loss UpperCAmelCase = position_embedding_type UpperCAmelCase = backbone UpperCAmelCase = use_pretrained_backbone UpperCAmelCase = dilation # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = mask_loss_coefficient UpperCAmelCase = dice_loss_coefficient UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient super().__init__(is_encoder_decoder=a__ , **a__ ) @property def __snake_case ( self : Dict ): return self.encoder_attention_heads @property def __snake_case ( self : Any ): return self.d_model class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =version.parse("1.11" ) @property def __snake_case ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __snake_case ( self : int ): return 1e-5 @property def __snake_case ( self : Tuple ): return 12
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path a__ : Any = 'src/transformers' # Matches is_xxx_available() a__ : Any = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} a__ : List[str] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a__ : Optional[Any] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available a__ : Dict = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") a__ : Any = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a__ : List[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", a__ : Optional[int] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], a__ : Tuple = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo a__ : Optional[Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: a__ : Dict = re.compile(R'^\s*try:') # Catches a line with else: a__ : Dict = re.compile(R'^\s*else:') def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> str: """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCAmelCase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase = f.readlines() UpperCAmelCase = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCAmelCase = re.findall('''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCAmelCase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCAmelCase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCAmelCase = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCAmelCase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase = lines[line_index] UpperCAmelCase = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase = lines[line_index] UpperCAmelCase = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase = [] for key in import_dict_objects.keys(): UpperCAmelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCAmelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase = '''base imports''' if key == '''none''' else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def __snake_case ( ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCAmelCase = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCAmelCase = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def __snake_case ( ) -> int: """simple docstring""" UpperCAmelCase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules a__ : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def __snake_case ( ) -> Any: """simple docstring""" UpperCAmelCase = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCAmelCase = spec.loader.load_module() UpperCAmelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCAmelCase = '''\n'''.join(f"- {module}" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f"{list_of_modules}\n" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
51
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a__ : List[Any] = 'sshleifer/mar_enro_6_3_student' class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __snake_case ( self : Dict ): super().setUp() UpperCAmelCase = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , ) UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def __snake_case ( self : Optional[int] ): MarianMTModel.from_pretrained(a__ ) @slow @require_torch_gpu def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __snake_case ( self : Any ): UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCAmelCase = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script UpperCAmelCase = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase = bash_script.replace(a__ , str(a__ ) ) UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' ) UpperCAmelCase = 6 UpperCAmelCase = ( ['''distillation.py'''] + bash_script.split() + [ f"--output_dir={output_dir}", '''--gpus=1''', '''--learning_rate=1e-3''', f"--num_train_epochs={epochs}", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(a__ , '''argv''' , a__ ): UpperCAmelCase = argparse.ArgumentParser() UpperCAmelCase = pl.Trainer.add_argparse_args(a__ ) UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() ) UpperCAmelCase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCAmelCase = distill_main(a__ ) # Check metrics UpperCAmelCase = load_json(model.metrics_save_path ) UpperCAmelCase = metrics['''val'''][0] UpperCAmelCase = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase = os.path.join(args.output_dir , a__ ) UpperCAmelCase = torch.load(a__ , map_location='''cpu''' ) UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase = {os.path.basename(a__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
51
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase_ ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _lowerCamelCase =Features({"image": Image()} ) _lowerCamelCase =Features({"labels": ClassLabel} ) _lowerCamelCase ="image" _lowerCamelCase ="labels" def __snake_case ( self : int , a__ : Union[str, Any] ): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , a__ ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) UpperCAmelCase = copy.deepcopy(self ) UpperCAmelCase = self.label_schema.copy() UpperCAmelCase = features[self.label_column] UpperCAmelCase = label_schema return task_template @property def __snake_case ( self : List[str] ): return { self.image_column: "image", self.label_column: "labels", }
51
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["image_processor", "tokenizer"] _lowerCamelCase ="CLIPImageProcessor" _lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ): return self.tokenizer.decode(*a__ , **a__ ) @property def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
51
1
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: """simple docstring""" UpperCAmelCase = [] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for v in tree.values(): shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple[int, ...] ) -> Tuple[int, ...]: """simple docstring""" UpperCAmelCase = [] for d in reversed(SCREAMING_SNAKE_CASE_ ): idx.append(flat_idx % d ) UpperCAmelCase = flat_idx // d return tuple(reversed(SCREAMING_SNAKE_CASE_ ) ) @torch.jit.ignore def __snake_case ( SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: """simple docstring""" def reduce_edge_list(SCREAMING_SNAKE_CASE_ : List[bool] ) -> None: UpperCAmelCase = True for i in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCAmelCase = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase = l[reversed_idx] if start_edges is None: UpperCAmelCase = [s == 0 for s in start] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) if end_edges is None: UpperCAmelCase = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(SCREAMING_SNAKE_CASE_ ) == 0: return [()] elif len(SCREAMING_SNAKE_CASE_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase = [] UpperCAmelCase = [] # Dimensions common to start and end can be selected directly for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if s == e: path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) ) else: break UpperCAmelCase = tuple(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) # start == end, and we're done if divergence_idx == len(SCREAMING_SNAKE_CASE_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase = start[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase = end[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def __snake_case ( SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> torch.Tensor: """simple docstring""" UpperCAmelCase = t.shape[:no_batch_dims] UpperCAmelCase = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) ) # Get an ordered list of slices to perform UpperCAmelCase = _get_minimal_slice_set( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any: """simple docstring""" if not (len(SCREAMING_SNAKE_CASE_ ) > 0): raise ValueError('''Must provide at least one input''' ) UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )] UpperCAmelCase = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] ) def _prep_inputs(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = None if _out is not None: UpperCAmelCase = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase = 0 UpperCAmelCase = prepped_outputs for _ in range(SCREAMING_SNAKE_CASE_ ): # Chunk the input if not low_mem: UpperCAmelCase = _select_chunk else: UpperCAmelCase = partial( _chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , ) UpperCAmelCase = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Run the layer on the chunk UpperCAmelCase = layer(**SCREAMING_SNAKE_CASE_ ) # Allocate space for the output if out is None: UpperCAmelCase = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) # Put the chunk in its pre-allocated space if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def assign(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> None: for k, v in da.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assign(SCREAMING_SNAKE_CASE_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase = da[k] assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase = xa elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size UpperCAmelCase = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) return out class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , a__ : int = 512 , ): UpperCAmelCase = max_chunk_size UpperCAmelCase = None UpperCAmelCase = None def __snake_case ( self : Any , a__ : Callable , a__ : tuple , a__ : int ): logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase = [c for c in candidates if c > min_chunk_size] UpperCAmelCase = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(a__ : int ) -> bool: try: with torch.no_grad(): fn(*a__ , chunk_size=a__ ) return True except RuntimeError: return False UpperCAmelCase = 0 UpperCAmelCase = len(a__ ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase = i UpperCAmelCase = (i + len(a__ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def __snake_case ( self : List[Any] , a__ : Iterable , a__ : Iterable ): UpperCAmelCase = True for aa, aa in zip(a__ , a__ ): assert type(a__ ) == type(a__ ) if isinstance(a__ , (list, tuple) ): consistent &= self._compare_arg_caches(a__ , a__ ) elif isinstance(a__ , a__ ): UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda a__ : x[0] )] UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda a__ : x[0] )] consistent &= self._compare_arg_caches(a__ , a__ ) else: consistent &= aa == aa return consistent def __snake_case ( self : Any , a__ : Callable , a__ : tuple , a__ : int , ): UpperCAmelCase = True UpperCAmelCase = tree_map(lambda a__ : a.shape if isinstance(a__ , torch.Tensor ) else a , a__ , a__ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(a__ ) UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , a__ ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase = False if not consistent: UpperCAmelCase = self._determine_favorable_chunk_size( a__ , a__ , a__ , ) UpperCAmelCase = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
51
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
51
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =DiTPipeline _lowerCamelCase =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _lowerCamelCase =PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } _lowerCamelCase =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _lowerCamelCase =False def __snake_case ( self : int ): torch.manual_seed(0 ) UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=a__ , ) UpperCAmelCase = AutoencoderKL() UpperCAmelCase = DDIMScheduler() UpperCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def __snake_case ( self : List[Any] , a__ : List[str] , a__ : str=0 ): if str(a__ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(a__ ) else: UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self : int ): UpperCAmelCase = '''cpu''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase = self.get_dummy_inputs(a__ ) UpperCAmelCase = pipe(**a__ ).images UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) UpperCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a__ , 1e-3 ) def __snake_case ( self : Optional[int] ): self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Any ): UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) UpperCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] UpperCAmelCase = pipe.get_label_ids(a__ ) UpperCAmelCase = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(a__ , a__ ): UpperCAmelCase = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1e-2 def __snake_case ( self : str ): UpperCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) UpperCAmelCase = ['''vase''', '''umbrella'''] UpperCAmelCase = pipe.get_label_ids(a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(a__ , a__ ): UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1e-1
51
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ): pass def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str: """simple docstring""" UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict: """simple docstring""" UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _lowerCamelCase =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ): UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __snake_case ( self : int , a__ : Dict , a__ : Tuple ): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def __snake_case ( self : str ): pass @slow @require_torch def __snake_case ( self : Optional[Any] ): UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871} ] , ) # fmt: on @require_torch @slow def __snake_case ( self : Dict ): UpperCAmelCase = '''facebook/sam-vit-huge''' UpperCAmelCase = pipeline('''mask-generation''' , model=a__ ) UpperCAmelCase = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053}, ] , )
51
1
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a__ : Optional[int] = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) if "model" in sd.keys(): UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights UpperCAmelCase = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: UpperCAmelCase = sd.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: UpperCAmelCase = sd[key] # We split QKV in separate Q,K,V UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) UpperCAmelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 ) UpperCAmelCase = q UpperCAmelCase = k UpperCAmelCase = v del sd[key] return sd @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> int: """simple docstring""" UpperCAmelCase = load_checkpoint(SCREAMING_SNAKE_CASE_ ) if config is not None: UpperCAmelCase = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = OPTConfig() UpperCAmelCase = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check results Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') a__ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
51
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : str = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
'''simple docstring''' import torch from transformers import AutoModel class lowerCAmelCase__ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ): super(a__ , self ).__init__() UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ ) UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 ) UpperCAmelCase = torch.nn.Softmax(dim=1 ) def __snake_case ( self : List[Any] , **a__ : Tuple ): return self.bert(**a__ ).last_hidden_state def __snake_case ( self : int , a__ : List[str] ): return token_embeddings.sum(2 , keepdim=a__ ) def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ): return self.softmax(T * self.cos(a__ , a__ ) ) def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ): UpperCAmelCase = W_supports['''sizes'''].tolist() UpperCAmelCase = W_supports['''start_token_id'''].item() UpperCAmelCase = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = self.BERT(**a__ ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = W_supports['''input_ids'''] == start_token_id UpperCAmelCase = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(a__ ): if i == 0: UpperCAmelCase = 0 else: UpperCAmelCase = support_sizes[i - 1] UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]] UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]] UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCAmelCase = torch.vstack((p_starts, p_start) ) UpperCAmelCase = torch.vstack((p_ends, p_end) ) else: UpperCAmelCase = p_start UpperCAmelCase = p_end return p_starts, p_ends
51
1
'''simple docstring''' class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , a__ : List[Any] , a__ : Optional[Any] , a__ : int ): UpperCAmelCase = name UpperCAmelCase = value UpperCAmelCase = weight def __repr__( self : str ): return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def __snake_case ( self : Optional[Any] ): return self.value def __snake_case ( self : List[Any] ): return self.name def __snake_case ( self : List[Any] ): return self.weight def __snake_case ( self : Optional[int] ): return self.value / self.weight def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = [] UpperCAmelCase, UpperCAmelCase = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __snake_case ( ) -> Optional[Any]: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
51
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =(EulerDiscreteScheduler,) _lowerCamelCase =10 def __snake_case ( self : str , **a__ : Tuple ): UpperCAmelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**a__ ) return config def __snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=a__ ) def __snake_case ( self : Optional[int] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=a__ , beta_end=a__ ) def __snake_case ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a__ ) def __snake_case ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase = sample.to(a__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 0.0_002 ) < 1e-2 assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3 def __snake_case ( self : Optional[int] ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __snake_case ( self : str ): UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ ) scheduler.set_timesteps(self.num_inference_steps , device=a__ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase = sample.to(a__ ) for t in scheduler.timesteps: UpperCAmelCase = scheduler.scale_model_input(a__ , a__ ) UpperCAmelCase = model(a__ , a__ ) UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ) UpperCAmelCase = output.prev_sample UpperCAmelCase = torch.sum(torch.abs(a__ ) ) UpperCAmelCase = torch.mean(torch.abs(a__ ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
51
1