code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCamelCase : Tuple = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
336
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = mask_ratio UpperCAmelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ): UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ): UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A ) UpperCAmelCase : int = model(__A, training=__A ) # expected sequence length = num_patches UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A ) UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(__A, training=__A ) UpperCAmelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = TFViTMAEModelTester(self ) UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : str ): UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(__A ) UpperCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __magic_name__ ( self : int ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : Dict = model(__A, noise=__A ) UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A ) UpperCAmelCase : Dict = outputs_dict[0].numpy() UpperCAmelCase : Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 ) def __magic_name__ ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A : Union[str, Any] ): UpperCAmelCase : str = {} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): UpperCAmelCase : Tuple = v.numpy() else: UpperCAmelCase : str = np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : Any = self._prepare_for_class(__A, __A ) UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A ) UpperCAmelCase : str = model(__A, noise=__A ) UpperCAmelCase : str = model(**__A, noise=__A ) self.assert_outputs_same(__A, __A ) def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ): # make masks reproducible np.random.seed(2 ) UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : int = tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : List[Any] = tf_noise super().check_pt_tf_models(__A, __A, __A ) def __magic_name__ ( self : str ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__A, __A ),) if isinstance(__A, __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A, '''_keras_serializable''', __A ) } UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(__A ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : Tuple = main_layer_class(__A ) UpperCAmelCase : int = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) ) UpperCAmelCase : List[Any] = model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' ) model.save(__A ) UpperCAmelCase : List[str] = tf.keras.models.load_model( __A, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A, tf.keras.Model ) UpperCAmelCase : Tuple = model(__A ) self.assert_outputs_same(__A, __A ) @slow def __magic_name__ ( self : Dict ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(__A ) UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A ) UpperCAmelCase : Union[str, Any] = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() UpperCAmelCase : Union[str, Any] = 0 else: UpperCAmelCase : Optional[int] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A, saved_model=__A ) UpperCAmelCase : Dict = model_class.from_pretrained(__A ) UpperCAmelCase : str = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy() UpperCAmelCase : Dict = 0 else: UpperCAmelCase : Any = after_outputs['''logits'''].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A, 1E-5 ) def __magic_name__ ( self : Optional[Any] ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : List[Any] = model(__A, noise=__A ) UpperCAmelCase : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) UpperCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : str = model_class.from_config(model.config ) UpperCAmelCase : List[str] = new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Tuple = new_model(__A, noise=__A ) self.assert_outputs_same(__A, __A ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Dict: UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__ ( self : str ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : Optional[int] = ViTMAEConfig() UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : Optional[int] = model(**__A, noise=__A ) # verify the logits UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
336
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=False ) -> List[Any]: try: UpperCAmelCase : Tuple = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase : Optional[Any] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase : Optional[Any] = strtobool(UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value _lowerCamelCase : List[str] = parse_flag_from_env("RUN_SLOW", default=False) _lowerCamelCase : str = parse_flag_from_env("RUN_REMOTE", default=False) _lowerCamelCase : int = parse_flag_from_env("RUN_LOCAL", default=True) _lowerCamelCase : Optional[Any] = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression _lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") _lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") _lowerCamelCase : int = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio _lowerCamelCase : Tuple = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam _lowerCamelCase : List[str] = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility _lowerCamelCase : Optional[Any] = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows _lowerCamelCase : Optional[Any] = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def a__ ( UpperCAmelCase : Tuple ) -> List[Any]: try: import faiss # noqa except ImportError: UpperCAmelCase : List[str] = unittest.skip('''test requires faiss''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: try: import regex # noqa except ImportError: UpperCAmelCase : List[str] = unittest.skip('''test requires regex''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : int ) -> List[Any]: try: import elasticsearch # noqa except ImportError: UpperCAmelCase : Optional[Any] = unittest.skip('''test requires elasticsearch''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : int ) -> Any: try: import sqlalchemy # noqa except ImportError: UpperCAmelCase : List[Any] = unittest.skip('''test requires sqlalchemy''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Optional[int] ) -> Any: if not config.TORCH_AVAILABLE: UpperCAmelCase : int = unittest.skip('''test requires PyTorch''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Optional[int] ) -> int: if not config.TF_AVAILABLE: UpperCAmelCase : Optional[Any] = unittest.skip('''test requires TensorFlow''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Dict ) -> Any: if not config.JAX_AVAILABLE: UpperCAmelCase : Optional[int] = unittest.skip('''test requires JAX''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Tuple ) -> List[Any]: if not config.PIL_AVAILABLE: UpperCAmelCase : Dict = unittest.skip('''test requires Pillow''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any: try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(UpperCAmelCase ) else: return test_case def a__ ( UpperCAmelCase : Optional[int] ) -> str: try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(UpperCAmelCase ) else: return test_case def a__ ( UpperCAmelCase : Optional[Any] ) -> Dict: try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(UpperCAmelCase ) else: return test_case def a__ ( UpperCAmelCase : Any ) -> Optional[Any]: def _require_spacy_model(UpperCAmelCase : List[str] ): try: import spacy # noqa F401 spacy.load(UpperCAmelCase ) except ImportError: return unittest.skip('''test requires spacy''' )(UpperCAmelCase ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(UpperCAmelCase ) )(UpperCAmelCase ) else: return test_case return _require_spacy_model def a__ ( UpperCAmelCase : List[Any] ) -> Optional[Any]: try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(UpperCAmelCase ) else: return test_case def a__ ( UpperCAmelCase : Optional[int] ) -> Optional[int]: try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(UpperCAmelCase ) else: return test_case def a__ ( UpperCAmelCase : List[Any] ) -> Any: if not _run_slow_tests or _run_slow_tests == 0: UpperCAmelCase : List[Any] = unittest.skip('''test is slow''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Dict ) -> List[Any]: if not _run_local_tests or _run_local_tests == 0: UpperCAmelCase : Union[str, Any] = unittest.skip('''test is local''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : int ) -> Tuple: if not _run_packaged_tests or _run_packaged_tests == 0: UpperCAmelCase : Any = unittest.skip('''test is packaged''' )(UpperCAmelCase ) return test_case def a__ ( UpperCAmelCase : Optional[int] ) -> Tuple: if not _run_remote_tests or _run_remote_tests == 0: UpperCAmelCase : List[str] = unittest.skip('''test requires remote''' )(UpperCAmelCase ) return test_case def a__ ( *UpperCAmelCase : str ) -> Tuple: def decorate(cls : List[Any] ): for name, fn in cls.__dict__.items(): if callable(UpperCAmelCase ) and name.startswith('''test''' ): for decorator in decorators: UpperCAmelCase : List[Any] = decorator(UpperCAmelCase ) setattr(cls , UpperCAmelCase , UpperCAmelCase ) return cls return decorate class __UpperCAmelCase ( lowerCamelCase__ ): pass class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = 2 @contextmanager def a__ ( UpperCAmelCase : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , UpperCAmelCase : int=1E-16 ) -> Any: UpperCAmelCase : Tuple = requests.Session().request def timeout_request(UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ): # Change the url to an invalid url so that the connection hangs UpperCAmelCase : Tuple = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) UpperCAmelCase : Union[str, Any] = timeout try: return online_request(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCAmelCase : int = url UpperCAmelCase : Optional[Any] = e.args[0] UpperCAmelCase : Tuple = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),) UpperCAmelCase : int = (max_retry_error,) raise def raise_connection_error(UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=UpperCAmelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , UpperCAmelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , UpperCAmelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCAmelCase ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def a__ ( *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> int: UpperCAmelCase : Optional[int] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*UpperCAmelCase , **UpperCAmelCase ) as tmp_dir: try: os.chdir(UpperCAmelCase ) yield finally: os.chdir(UpperCAmelCase ) @contextmanager def a__ ( ) -> List[str]: import gc gc.collect() UpperCAmelCase : str = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def a__ ( ) -> Union[str, Any]: import gc gc.collect() UpperCAmelCase : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: return deepcopy(UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: import decorator from requests.exceptions import HTTPError def _wrapper(UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ): try: return func(*UpperCAmelCase , **UpperCAmelCase ) except HTTPError as err: if str(UpperCAmelCase ).startswith('''500''' ) or str(UpperCAmelCase ).startswith('''502''' ): pytest.xfail(str(UpperCAmelCase ) ) raise err return decorator.decorator(_wrapper , UpperCAmelCase ) class __UpperCAmelCase : def __init__( self : Optional[int], __A : Tuple, __A : List[str], __A : Union[str, Any] ): UpperCAmelCase : Dict = returncode UpperCAmelCase : int = stdout UpperCAmelCase : Any = stderr async def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> str: while True: UpperCAmelCase : List[Any] = await stream.readline() if line: callback(UpperCAmelCase ) else: break async def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Tuple=False ) -> _RunOutput: if echo: print('''\nRunning: ''' , ''' '''.join(UpperCAmelCase ) ) UpperCAmelCase : Union[str, Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : Union[str, Any] = [] def tee(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]="" ): UpperCAmelCase : str = line.decode('''utf-8''' ).rstrip() sink.append(UpperCAmelCase ) if not quiet: print(UpperCAmelCase , UpperCAmelCase , file=UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stderr , label='''stderr:''' ) ), ] , timeout=UpperCAmelCase , ) return _RunOutput(await p.wait() , UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=180 , UpperCAmelCase : List[str]=False , UpperCAmelCase : str=True ) -> _RunOutput: UpperCAmelCase : List[str] = asyncio.get_event_loop() UpperCAmelCase : Union[str, Any] = loop.run_until_complete( _stream_subprocess(UpperCAmelCase , env=UpperCAmelCase , stdin=UpperCAmelCase , timeout=UpperCAmelCase , quiet=UpperCAmelCase , echo=UpperCAmelCase ) ) UpperCAmelCase : Dict = ''' '''.join(UpperCAmelCase ) if result.returncode > 0: UpperCAmelCase : Optional[Any] = '''\n'''.join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' ) return result def a__ ( ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) UpperCAmelCase : Union[str, Any] = re.sub(r'''^gw''' , '''''' , UpperCAmelCase , 0 , re.M ) return int(UpperCAmelCase ) def a__ ( ) -> int: UpperCAmelCase : List[str] = 29_500 UpperCAmelCase : List[str] = pytest_xdist_worker_id() return port + uniq_delta
336
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
1
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __UpperCAmelCase : UpperCamelCase = BlenderbotConfig UpperCamelCase = {} UpperCamelCase = """gelu""" def __init__( self : Tuple, __A : Optional[Any], __A : Dict=1_3, __A : List[str]=7, __A : Optional[int]=True, __A : List[Any]=False, __A : Union[str, Any]=9_9, __A : int=3_2, __A : Union[str, Any]=2, __A : Optional[int]=4, __A : int=3_7, __A : List[Any]=0.1, __A : Dict=0.1, __A : Union[str, Any]=2_0, __A : Optional[Any]=2, __A : List[Any]=1, __A : Union[str, Any]=0, ): UpperCAmelCase : str = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : Dict = seq_length UpperCAmelCase : str = is_training UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Any = hidden_size UpperCAmelCase : List[Any] = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase : Dict = max_position_embeddings UpperCAmelCase : Any = eos_token_id UpperCAmelCase : Union[str, Any] = pad_token_id UpperCAmelCase : List[str] = bos_token_id def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) UpperCAmelCase : Any = tf.concat([input_ids, eos_tensor], axis=1 ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : int = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) UpperCAmelCase : List[Any] = prepare_blenderbot_inputs_dict(__A, __A, __A ) return config, inputs_dict def __magic_name__ ( self : Tuple, __A : int, __A : List[str] ): UpperCAmelCase : Union[str, Any] = TFBlenderbotModel(config=__A ).get_decoder() UpperCAmelCase : Union[str, Any] = inputs_dict['''input_ids'''] UpperCAmelCase : List[str] = input_ids[:1, :] UpperCAmelCase : Any = inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Optional[Any] = inputs_dict['''head_mask'''] UpperCAmelCase : Tuple = 1 # first forward pass UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, head_mask=__A, use_cache=__A ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3), config.vocab_size ) UpperCAmelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and UpperCAmelCase : List[str] = tf.concat([input_ids, next_tokens], axis=-1 ) UpperCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) UpperCAmelCase : List[Any] = model(__A, attention_mask=__A )[0] UpperCAmelCase : str = model(__A, attention_mask=__A, past_key_values=__A )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice UpperCAmelCase : int = int(ids_tensor((1,), output_from_past.shape[-1] ) ) UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__A, __A, rtol=1E-3 ) def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=None , ) -> Any: if attention_mask is None: UpperCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Any ): UpperCAmelCase : Tuple = TFBlenderbotModelTester(self ) UpperCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__A ) def __magic_name__ ( self : Optional[Any] ): self.config_tester.run_common_tests() def __magic_name__ ( self : List[str] ): UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__A ) @require_tokenizers @require_tf class __UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = ["""My friends are cool but they eat too many carbs."""] UpperCamelCase = """facebook/blenderbot-400M-distill""" @cached_property def __magic_name__ ( self : str ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __magic_name__ ( self : Dict ): UpperCAmelCase : int = self.tokenizer(self.src_text, return_tensors='''tf''' ) UpperCAmelCase : str = self.model.generate( model_inputs.input_ids, ) UpperCAmelCase : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=__A )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
336
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
1
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCAmelCase ) ) def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> bool: # Base Case if index == len(UpperCAmelCase ): return True # Recursive Step for i in range(UpperCAmelCase ): if valid_coloring(graph[index] , UpperCAmelCase , UpperCAmelCase ): # Color current vertex UpperCAmelCase : List[str] = i # Validate coloring if util_color(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , index + 1 ): return True # Backtrack UpperCAmelCase : Any = -1 return False def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int ) -> list[int]: UpperCAmelCase : Optional[Any] = [-1] * len(UpperCAmelCase ) if util_color(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 0 ): return colored_vertices return []
336
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCAmelCase : def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Any = hidden_act UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope UpperCAmelCase : List[str] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : str = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, ) def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ): UpperCAmelCase : int = ViTHybridModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ): UpperCAmelCase : str = self.type_sequence_label_size UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ViTHybridModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : int ): UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, nn.Linear ) ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(__A ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : str = [*signature.parameters.keys()] UpperCAmelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = _config_zero_init(__A ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(config=__A ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def __magic_name__ ( self : List[str] ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> Tuple: UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __A ) UpperCAmelCase : Tuple = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**__A ) # verify the logits UpperCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow @require_accelerate def __magic_name__ ( self : Dict ): UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**__A ) UpperCAmelCase : Any = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Dict = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
336
1
_lowerCamelCase : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
1
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCAmelCase : def __magic_name__ ( self : int, __A : Dict ): raise NotImplementedError() def __magic_name__ ( self : int ): raise NotImplementedError() class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ): UpperCAmelCase : List[str] = tokenizer UpperCAmelCase : str = skip_prompt UpperCAmelCase : List[str] = decode_kwargs # variables used in the streaming process UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = 0 UpperCAmelCase : Union[str, Any] = True def __magic_name__ ( self : Dict, __A : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: UpperCAmelCase : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] UpperCAmelCase : int = [] UpperCAmelCase : int = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __magic_name__ ( self : str ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) UpperCAmelCase : Dict = text[self.print_len :] UpperCAmelCase : List[Any] = [] UpperCAmelCase : List[Any] = 0 else: UpperCAmelCase : Dict = '''''' UpperCAmelCase : str = True self.on_finalized_text(__A, stream_end=__A ) def __magic_name__ ( self : List[str], __A : str, __A : bool = False ): print(__A, flush=__A, end='''''' if not stream_end else None ) def __magic_name__ ( self : List[Any], __A : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ): super().__init__(__A, __A, **__A ) UpperCAmelCase : Dict = Queue() UpperCAmelCase : Any = None UpperCAmelCase : Any = timeout def __magic_name__ ( self : Dict, __A : str, __A : bool = False ): self.text_queue.put(__A, timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout ) def __iter__( self : int ): return self def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
336
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowerCamelCase : int = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(UpperCAmelCase , np.ndarray ): return list(tensor.shape ) UpperCAmelCase : int = tf.shape(UpperCAmelCase ) if tensor.shape == tf.TensorShape(UpperCAmelCase ): return dynamic UpperCAmelCase : Optional[Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )] def a__ ( UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase ) def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=1E-5 , UpperCAmelCase : int=-1 ) -> Optional[int]: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized UpperCAmelCase , UpperCAmelCase : Any = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis UpperCAmelCase : Any = [1] * inputs.shape.rank UpperCAmelCase : List[Any] = shape_list(UpperCAmelCase )[axis] UpperCAmelCase : int = tf.reshape(UpperCAmelCase , UpperCAmelCase ) UpperCAmelCase : Optional[int] = tf.reshape(UpperCAmelCase , UpperCAmelCase ) # Compute layer normalization using the batch_normalization # function. UpperCAmelCase : List[Any] = tf.nn.batch_normalization( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , ) return outputs def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[Any]=-1 ) -> Union[str, Any]: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input UpperCAmelCase : List[str] = tf.shape(UpperCAmelCase ) UpperCAmelCase : Tuple = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) UpperCAmelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : tf.Tensor ) -> tf.Tensor: if not isinstance(UpperCAmelCase , tf.Tensor ): UpperCAmelCase : Any = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: UpperCAmelCase : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: UpperCAmelCase : Dict = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) UpperCAmelCase : Dict = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def a__ ( UpperCAmelCase : tf.Tensor , UpperCAmelCase : int , UpperCAmelCase : str = "input_ids" ) -> None: tf.debugging.assert_less( UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=( f'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding ''' f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ) -> List[Any]: UpperCAmelCase : Optional[Any] = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. UpperCAmelCase : Optional[int] = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' f'''bytes: {bad_attributes}''' ) UpperCAmelCase : str = np.asarray(UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Any = np.array_split(UpperCAmelCase , UpperCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 UpperCAmelCase : Optional[Any] = np.array_split(UpperCAmelCase , UpperCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase ): UpperCAmelCase : Tuple = chunk_data else: UpperCAmelCase : Dict = data def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Dict ) -> List[Any]: if name in group.attrs: UpperCAmelCase : Tuple = [n.decode('''utf8''' ) if hasattr(UpperCAmelCase , '''decode''' ) else n for n in group.attrs[name]] else: UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(UpperCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def a__ ( UpperCAmelCase : Dict ) -> List[Any]: def _expand_single_ad_tensor(UpperCAmelCase : Any ): if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
336
import numpy # List of input, output pairs _lowerCamelCase : Dict = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _lowerCamelCase : Dict = [2, 4, 1, 5] _lowerCamelCase : Dict = len(train_data) _lowerCamelCase : int = 0.0_0_9 def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict: return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output( UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Any: UpperCAmelCase : str = 0 for i in range(len(UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict: UpperCAmelCase : Optional[int] = 0 for i in range(UpperCAmelCase ): if index == -1: summation_value += _error(UpperCAmelCase ) else: summation_value += _error(UpperCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( UpperCAmelCase : Dict ) -> Dict: UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] = 0.000002 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = get_cost_derivative(i - 1 ) UpperCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ): break UpperCAmelCase : int = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ) -> List[Any]: for i in range(len(UpperCAmelCase ) ): print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
336
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Dict = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : int, __A : List[Any], __A : Tuple ): UpperCAmelCase : Any = dataset UpperCAmelCase : Any = process UpperCAmelCase : Union[str, Any] = params def __len__( self : int ): return len(self.dataset ) def __getitem__( self : List[Any], __A : Tuple ): UpperCAmelCase : Union[str, Any] = self.dataset[i] UpperCAmelCase : Optional[Any] = self.process(__A, **self.params ) return processed class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : int, __A : List[str], __A : Optional[int], __A : Union[str, Any]=None ): UpperCAmelCase : Any = loader UpperCAmelCase : str = infer UpperCAmelCase : Dict = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase : List[Any] = None UpperCAmelCase : Dict = loader_batch_size # Internal bookkeeping UpperCAmelCase : List[str] = None UpperCAmelCase : Optional[int] = None def __len__( self : Optional[int] ): return len(self.loader ) def __iter__( self : int ): UpperCAmelCase : Dict = iter(self.loader ) return self def __magic_name__ ( self : Dict ): if isinstance(self._loader_batch_data, torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase : Optional[int] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase : Union[str, Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(__A, __A ): # Convert ModelOutput to tuple first UpperCAmelCase : str = element.to_tuple() if isinstance(element[0], torch.Tensor ): UpperCAmelCase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): UpperCAmelCase : str = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__A, __A ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor ): UpperCAmelCase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): UpperCAmelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase : Tuple = None elif isinstance(element[self._loader_batch_index], torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index], np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase : str = np.expand_dims(element[self._loader_batch_index], 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase : int = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase : Tuple = self._loader_batch_data.__class__(__A ) self._loader_batch_index += 1 return result def __magic_name__ ( self : List[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase : List[Any] = next(self.iterator ) UpperCAmelCase : List[Any] = self.infer(__A, **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(__A, torch.Tensor ): UpperCAmelCase : Dict = processed else: UpperCAmelCase : Optional[Any] = list(processed.keys() )[0] UpperCAmelCase : Union[str, Any] = processed[key] if isinstance(__A, __A ): UpperCAmelCase : int = len(__A ) else: UpperCAmelCase : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase : Dict = processed UpperCAmelCase : str = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Optional[Any], __A : Optional[int], __A : str, __A : str, __A : Tuple=None ): super().__init__(__A, __A, __A ) def __iter__( self : List[str] ): UpperCAmelCase : List[str] = iter(self.loader ) UpperCAmelCase : int = None return self def __magic_name__ ( self : Tuple ): if self.subiterator is None: UpperCAmelCase : Dict = self.infer(next(self.iterator ), **self.params ) try: # Try to return next item UpperCAmelCase : Tuple = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase : Dict = self.infer(next(self.iterator ), **self.params ) UpperCAmelCase : Tuple = next(self.subiterator ) return processed class __UpperCAmelCase ( lowerCamelCase__ ): def __iter__( self : Union[str, Any] ): UpperCAmelCase : Union[str, Any] = iter(self.loader ) return self def __magic_name__ ( self : int ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. UpperCAmelCase : Tuple = False UpperCAmelCase : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase : Optional[Any] = self.loader_batch_item() UpperCAmelCase : Dict = item.pop('''is_last''' ) accumulator.append(__A ) if is_last: return accumulator while not is_last: UpperCAmelCase : List[Any] = self.infer(next(self.iterator ), **self.params ) if self.loader_batch_size is not None: if isinstance(__A, torch.Tensor ): UpperCAmelCase : int = processed else: UpperCAmelCase : List[str] = list(processed.keys() )[0] UpperCAmelCase : Optional[int] = processed[key] if isinstance(__A, __A ): UpperCAmelCase : Optional[int] = len(__A ) else: UpperCAmelCase : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase : Optional[Any] = observed_batch_size UpperCAmelCase : Optional[Any] = processed UpperCAmelCase : Any = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase : List[Any] = self.loader_batch_item() UpperCAmelCase : Dict = item.pop('''is_last''' ) accumulator.append(__A ) if is_last: return accumulator else: UpperCAmelCase : Dict = processed UpperCAmelCase : Optional[Any] = item.pop('''is_last''' ) accumulator.append(__A ) return accumulator class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : List[Any], __A : Dataset, __A : str ): UpperCAmelCase : Optional[Any] = dataset UpperCAmelCase : List[Any] = key def __len__( self : str ): return len(self.dataset ) def __getitem__( self : Any, __A : List[str] ): return self.dataset[i][self.key] class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Optional[Any], __A : Dataset, __A : str, __A : str ): UpperCAmelCase : int = dataset UpperCAmelCase : Optional[Any] = keya UpperCAmelCase : Optional[Any] = keya def __len__( self : List[Any] ): return len(self.dataset ) def __getitem__( self : Optional[int], __A : int ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
336
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''), (f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''), (f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''), (f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any: for i in range(config.num_hidden_layers ): UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : str = q_bias UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) UpperCAmelCase : str = gamma_a UpperCAmelCase : Dict = gamma_a def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : str = val def a__ ( ) -> Optional[int]: UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]: UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase : List[Any] = 1_024 UpperCAmelCase : Optional[Any] = 4_096 UpperCAmelCase : Any = 24 UpperCAmelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : List[Any] = '''huggingface/label-files''' UpperCAmelCase : Any = '''rvlcdip-id2label.json''' UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image UpperCAmelCase : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str = encoding['''pixel_values'''] UpperCAmelCase : Any = model(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = outputs.logits # verify logits UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
336
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LEDTokenizer UpperCamelCase = LEDTokenizerFast UpperCamelCase = True def __magic_name__ ( self : int ): super().setUp() UpperCAmelCase : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase : Tuple = dict(zip(__A, range(len(__A ) ) ) ) UpperCAmelCase : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase : Any = {'''unk_token''': '''<unk>'''} UpperCAmelCase : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __magic_name__ ( self : Any, **__A : List[Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Tuple, **__A : Dict ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : int, __A : Tuple ): return "lower newer", "lower newer" @cached_property def __magic_name__ ( self : List[Any] ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __magic_name__ ( self : Any ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase : int = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[str] = tokenizer(__A, max_length=len(__A ), padding=__A, return_tensors='''pt''' ) self.assertIsInstance(__A, __A ) self.assertEqual((2, 9), batch.input_ids.shape ) self.assertEqual((2, 9), batch.attention_mask.shape ) UpperCAmelCase : Any = batch.input_ids.tolist()[0] self.assertListEqual(__A, __A ) @require_torch def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[str] = tokenizer(__A, padding=__A, return_tensors='''pt''' ) self.assertIn('''input_ids''', __A ) self.assertIn('''attention_mask''', __A ) self.assertNotIn('''labels''', __A ) self.assertNotIn('''decoder_attention_mask''', __A ) @require_torch def __magic_name__ ( self : int ): UpperCAmelCase : Optional[int] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[Any] = tokenizer(text_target=__A, max_length=3_2, padding='''max_length''', return_tensors='''pt''' ) self.assertEqual(3_2, targets['''input_ids'''].shape[1] ) @require_torch def __magic_name__ ( self : Optional[int] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[Any] = tokenizer( ['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''], padding=__A, truncation=__A, return_tensors='''pt''' ) self.assertIsInstance(__A, __A ) self.assertEqual(batch.input_ids.shape, (2, 5_1_2_2) ) @require_torch def __magic_name__ ( self : int ): UpperCAmelCase : Any = ['''A long paragraph for summarization.'''] UpperCAmelCase : str = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : Dict = tokenizer(__A, return_tensors='''pt''' ) UpperCAmelCase : str = tokenizer(text_target=__A, return_tensors='''pt''' ) UpperCAmelCase : Optional[Any] = inputs['''input_ids'''] UpperCAmelCase : Optional[Any] = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __magic_name__ ( self : int ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : str = ['''Summary of the text.''', '''Another summary.'''] UpperCAmelCase : Optional[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCAmelCase : Union[str, Any] = tokenizer(__A, padding=__A ) UpperCAmelCase : Dict = [[0] * len(__A ) for x in encoded_output['''input_ids''']] UpperCAmelCase : List[Any] = tokenizer.pad(__A ) self.assertSequenceEqual(outputs['''global_attention_mask'''], __A ) def __magic_name__ ( self : str ): pass def __magic_name__ ( self : List[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(__A, **__A ) UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(__A, **__A ) UpperCAmelCase : List[Any] = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase : List[str] = tokenizer_r.encode_plus(__A, add_special_tokens=__A, return_token_type_ids=__A ) UpperCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(__A, add_special_tokens=__A, return_token_type_ids=__A ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ), sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ), sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ), ) UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''], [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''], [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __A, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __A, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
336
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
1
import random def a__ ( UpperCAmelCase : int , UpperCAmelCase : float , UpperCAmelCase : bool = False ) -> dict: UpperCAmelCase : dict = {i: [] for i in range(UpperCAmelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCAmelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCAmelCase ): for j in range(i + 1 , UpperCAmelCase ): if random.random() < probability: graph[i].append(UpperCAmelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCAmelCase ) return graph def a__ ( UpperCAmelCase : int ) -> dict: return { i: [j for j in range(UpperCAmelCase ) if i != j] for i in range(UpperCAmelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
336
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = {"vocab_file": "vocab.txt"} _lowerCamelCase : List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase : List[Any] = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def a__ ( UpperCAmelCase : List[str] ) -> Any: with open(UpperCAmelCase , '''r''' ) as f: UpperCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ): super().__init__(**__A ) UpperCAmelCase : Tuple = load_vocab_file(__A ) UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Union[str, Any] = unk_token UpperCAmelCase : Optional[Any] = cls_token UpperCAmelCase : Optional[int] = pad_token UpperCAmelCase : Optional[int] = mask_token UpperCAmelCase : List[str] = eos_token UpperCAmelCase : Optional[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __magic_name__ ( self : Tuple, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : List[Any], __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ): return text.split() def __magic_name__ ( self : Optional[int], __A : Dict=False ): return len(self._id_to_token ) def __magic_name__ ( self : int ): return {token: i for i, token in enumerate(self.all_tokens )} def __magic_name__ ( self : Tuple, __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: mask += [0] * len(__A ) + [1] return mask def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ): UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__A, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __magic_name__ ( self : Dict ): return self.get_vocab_size(with_added_tokens=__A ) def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ): return super()._add_tokens(__A, special_tokens=__A )
336
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCAmelCase ( lowerCamelCase__ ): def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase : str = '''__cached_''' + self.fget.__name__ UpperCAmelCase : int = getattr(__A, __A, __A ) if cached is None: UpperCAmelCase : Any = self.fget(__A ) setattr(__A, __A, __A ) return cached def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_torch_fx_proxy(UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]: return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : str ) -> Tuple: return _is_numpy(UpperCAmelCase ) def a__ ( UpperCAmelCase : str ) -> List[Any]: import torch return isinstance(UpperCAmelCase , torch.Tensor ) def a__ ( UpperCAmelCase : str ) -> List[Any]: return False if not is_torch_available() else _is_torch(UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> List[str]: import torch return isinstance(UpperCAmelCase , torch.device ) def a__ ( UpperCAmelCase : Any ) -> Any: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: import torch if isinstance(UpperCAmelCase , UpperCAmelCase ): if hasattr(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ) else: return False return isinstance(UpperCAmelCase , torch.dtype ) def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase ) def a__ ( UpperCAmelCase : Any ) -> str: import tensorflow as tf return isinstance(UpperCAmelCase , tf.Tensor ) def a__ ( UpperCAmelCase : int ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[str] ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(UpperCAmelCase ) return type(UpperCAmelCase ) == tf.Tensor def a__ ( UpperCAmelCase : int ) -> List[Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[Any] ) -> Dict: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase , jnp.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]: return False if not is_flax_available() else _is_jax(UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Tuple: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return [to_py_obj(UpperCAmelCase ) for o in obj] elif is_tf_tensor(UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ).tolist() elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( UpperCAmelCase : Any ) -> List[str]: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return np.array(UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ) else: return obj class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = fields(self ) # Safety and consistency checks if not len(__A ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase : int = getattr(self, class_fields[0].name ) UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__A ): if isinstance(__A, __A ): UpperCAmelCase : Tuple = first_field.items() UpperCAmelCase : Any = True else: try: UpperCAmelCase : Optional[Any] = iter(__A ) UpperCAmelCase : Optional[Any] = True except TypeError: UpperCAmelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__A ): if ( not isinstance(__A, (list, tuple) ) or not len(__A ) == 2 or not isinstance(element[0], __A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: UpperCAmelCase : Union[str, Any] = element[1] elif first_field is not None: UpperCAmelCase : Union[str, Any] = first_field else: for field in class_fields: UpperCAmelCase : Optional[Any] = getattr(self, field.name ) if v is not None: UpperCAmelCase : Optional[int] = v def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Any, *__A : Dict, **__A : str ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str], __A : List[str] ): if isinstance(__A, __A ): UpperCAmelCase : int = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__A, __A ) super().__setattr__(__A, __A ) def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ): # Will raise a KeyException if needed super().__setitem__(__A, __A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__A, __A ) def __magic_name__ ( self : List[str] ): return tuple(self[k] for k in self.keys() ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def __magic_name__ ( cls : List[Any], __A : Tuple ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """longest""" UpperCamelCase = """max_length""" UpperCamelCase = """do_not_pad""" class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """pt""" UpperCamelCase = """tf""" UpperCamelCase = """np""" UpperCamelCase = """jax""" class __UpperCAmelCase : def __init__( self : Any, __A : List[ContextManager] ): UpperCAmelCase : Tuple = context_managers UpperCAmelCase : Tuple = ExitStack() def __enter__( self : Any ): for context_manager in self.context_managers: self.stack.enter_context(__A ) def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ): self.stack.__exit__(*__A, **__A ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : int = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( UpperCAmelCase : Dict ) -> Any: UpperCAmelCase : List[Any] = model_class.__name__ UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]: def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ): for k, v in d.items(): UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k if v and isinstance(UpperCAmelCase , UpperCAmelCase ): yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) @contextmanager def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]: if is_numpy_array(UpperCAmelCase ): return np.transpose(UpperCAmelCase , axes=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.T if axes is None else array.permute(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.reshape(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.reshape(UpperCAmelCase , UpperCAmelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any: if is_numpy_array(UpperCAmelCase ): return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if is_numpy_array(UpperCAmelCase ): return np.expand_dims(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.unsqueeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.size(UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.numel() elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.size(UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict: for key, value in auto_map.items(): if isinstance(UpperCAmelCase , (tuple, list) ): UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase : List[Any] = f'''{repo_id}--{value}''' return auto_map def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCAmelCase ): UpperCAmelCase : Any = base_class.__module__ UpperCAmelCase : Dict = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
336
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) ) class __UpperCAmelCase : def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ): UpperCAmelCase : Optional[int] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = depth_multiplier UpperCAmelCase : Union[str, Any] = depth_divisible_by UpperCAmelCase : Optional[Any] = min_depth UpperCAmelCase : List[str] = expand_ratio UpperCAmelCase : Dict = tf_padding UpperCAmelCase : str = output_stride UpperCAmelCase : Union[str, Any] = first_layer_is_expansion UpperCAmelCase : List[Any] = finegrained_output UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[str] = is_training UpperCAmelCase : Tuple = num_labels UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = scope def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ): UpperCAmelCase : Any = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : Any = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[int] = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCAmelCase : Optional[Any] = model(__A, labels=__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = MobileNetVaModelTester(self ) UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__ ( self : Any ): pass def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(__A ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : int ): def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ): UpperCAmelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Optional[Any] = outputs.hidden_states UpperCAmelCase : List[Any] = 1_6 self.assertEqual(len(__A ), __A ) UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __magic_name__ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> int: UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : str = model(**__A ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = model.to(__A ) UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**__A ) UpperCAmelCase : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ], device=__A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
336
1
from __future__ import annotations _lowerCamelCase : Dict = [True] * 1_0_0_0_0_0_1 _lowerCamelCase : List[Any] = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): _lowerCamelCase : List[Any] = False i += 1 def a__ ( UpperCAmelCase : int ) -> bool: return seive[n] def a__ ( UpperCAmelCase : int ) -> bool: return any(digit in '''02468''' for digit in str(UpperCAmelCase ) ) def a__ ( UpperCAmelCase : int = 1_000_000 ) -> list[int]: UpperCAmelCase : List[Any] = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(UpperCAmelCase ) and not contains_an_even_digit(UpperCAmelCase ): UpperCAmelCase : Any = str(UpperCAmelCase ) UpperCAmelCase : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase ) )] if all(is_prime(UpperCAmelCase ) for i in list_nums ): result.append(UpperCAmelCase ) return result def a__ ( ) -> int: return len(find_circular_primes() ) if __name__ == "__main__": print(f"""{len(find_circular_primes()) = }""")
336
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
1
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _lowerCamelCase : Optional[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") _lowerCamelCase : Any = f"""https://www.google.com/search?q={query}&num=100""" _lowerCamelCase : List[Any] = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: _lowerCamelCase : List[Any] = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: _lowerCamelCase : Union[str, Any] = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
336
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
336
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _lowerCamelCase : int = logging.get_logger(__name__) def a__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, Iterable[int]] , UpperCAmelCase : bool , UpperCAmelCase : int ) -> Tuple[int, int]: def constraint_to_multiple_of(UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str=0 , UpperCAmelCase : Any=None ): UpperCAmelCase : List[str] = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase : List[Any] = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase : str = math.ceil(val / multiple ) * multiple return x UpperCAmelCase : Tuple = (output_size, output_size) if isinstance(UpperCAmelCase , UpperCAmelCase ) else output_size UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_image_size(UpperCAmelCase ) UpperCAmelCase , UpperCAmelCase : int = output_size # determine new height and width UpperCAmelCase : int = output_height / input_height UpperCAmelCase : Optional[int] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase : Optional[int] = scale_width else: # fit height UpperCAmelCase : Any = scale_height UpperCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_height * input_height , multiple=UpperCAmelCase ) UpperCAmelCase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=UpperCAmelCase ) return (new_height, new_width) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = ["""pixel_values"""] def __init__( self : Tuple, __A : bool = True, __A : Dict[str, int] = None, __A : PILImageResampling = PILImageResampling.BILINEAR, __A : bool = False, __A : int = 1, __A : bool = True, __A : Union[int, float] = 1 / 2_5_5, __A : bool = True, __A : Optional[Union[float, List[float]]] = None, __A : Optional[Union[float, List[float]]] = None, **__A : Dict, ): super().__init__(**__A ) UpperCAmelCase : Dict = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} UpperCAmelCase : Optional[int] = get_size_dict(__A ) UpperCAmelCase : Union[str, Any] = do_resize UpperCAmelCase : Optional[int] = size UpperCAmelCase : Tuple = keep_aspect_ratio UpperCAmelCase : int = ensure_multiple_of UpperCAmelCase : int = resample UpperCAmelCase : Dict = do_rescale UpperCAmelCase : Any = rescale_factor UpperCAmelCase : Optional[Any] = do_normalize UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def __magic_name__ ( self : Optional[int], __A : np.ndarray, __A : Dict[str, int], __A : bool = False, __A : int = 1, __A : PILImageResampling = PILImageResampling.BICUBIC, __A : Optional[Union[str, ChannelDimension]] = None, **__A : Union[str, Any], ): UpperCAmelCase : Tuple = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) UpperCAmelCase : Optional[Any] = get_resize_output_image_size( __A, output_size=(size['''height'''], size['''width''']), keep_aspect_ratio=__A, multiple=__A, ) return resize(__A, size=__A, resample=__A, data_format=__A, **__A ) def __magic_name__ ( self : int, __A : np.ndarray, __A : Union[int, float], __A : Optional[Union[str, ChannelDimension]] = None, **__A : Tuple, ): return rescale(__A, scale=__A, data_format=__A, **__A ) def __magic_name__ ( self : Dict, __A : np.ndarray, __A : Union[float, List[float]], __A : Union[float, List[float]], __A : Optional[Union[str, ChannelDimension]] = None, **__A : Union[str, Any], ): return normalize(__A, mean=__A, std=__A, data_format=__A, **__A ) def __magic_name__ ( self : List[Any], __A : ImageInput, __A : bool = None, __A : int = None, __A : bool = None, __A : int = None, __A : PILImageResampling = None, __A : bool = None, __A : float = None, __A : bool = None, __A : Optional[Union[float, List[float]]] = None, __A : Optional[Union[float, List[float]]] = None, __A : Optional[Union[str, TensorType]] = None, __A : ChannelDimension = ChannelDimension.FIRST, **__A : Optional[int], ): UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : int = size if size is not None else self.size UpperCAmelCase : int = get_size_dict(__A ) UpperCAmelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase : List[str] = resample if resample is not None else self.resample UpperCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : str = image_std if image_std is not None else self.image_std UpperCAmelCase : List[Any] = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase : str = [to_numpy_array(__A ) for image in images] if do_resize: UpperCAmelCase : Any = [self.resize(image=__A, size=__A, resample=__A ) for image in images] if do_rescale: UpperCAmelCase : Tuple = [self.rescale(image=__A, scale=__A ) for image in images] if do_normalize: UpperCAmelCase : Tuple = [self.normalize(image=__A, mean=__A, std=__A ) for image in images] UpperCAmelCase : Optional[int] = [to_channel_dimension_format(__A, __A ) for image in images] UpperCAmelCase : Tuple = {'''pixel_values''': images} return BatchFeature(data=__A, tensor_type=__A ) def __magic_name__ ( self : str, __A : Optional[Any], __A : List[Tuple] = None ): UpperCAmelCase : str = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__A ) != len(__A ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__A ): UpperCAmelCase : Union[str, Any] = target_sizes.numpy() UpperCAmelCase : Dict = [] for idx in range(len(__A ) ): UpperCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='''bilinear''', align_corners=__A ) UpperCAmelCase : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__A ) else: UpperCAmelCase : Dict = logits.argmax(dim=1 ) UpperCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class __UpperCAmelCase : # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) @classmethod def __magic_name__ ( cls : Any ): return cls() @dataclass class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @property def __magic_name__ ( self : Optional[int] ): return True @register_to_config def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ): pass def __magic_name__ ( self : Optional[Any] ): return KarrasVeSchedulerState.create() def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ): UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy() UpperCAmelCase : Union[str, Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, ) def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ): if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 ) else: UpperCAmelCase : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 ) UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape ) UpperCAmelCase : Tuple = sigma + gamma * sigma UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : int = sample_hat + sigma_hat * model_output UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ): raise NotImplementedError()
336
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Tuple = "▁" _lowerCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"} _lowerCamelCase : str = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } _lowerCamelCase : Optional[int] = { "facebook/nllb-200-distilled-600M": 1_0_2_4, } # fmt: off _lowerCamelCase : Dict = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = ["""input_ids""", """attention_mask"""] UpperCamelCase = [] UpperCamelCase = [] def __init__( self : List[str], __A : Optional[int], __A : List[str]="<s>", __A : Union[str, Any]="</s>", __A : Union[str, Any]="</s>", __A : str="<s>", __A : int="<unk>", __A : List[str]="<pad>", __A : Optional[Any]="<mask>", __A : Optional[int]=None, __A : Optional[Any]=None, __A : Union[str, Any]=None, __A : Optional[Dict[str, Any]] = None, __A : Dict=None, __A : Union[str, Any]=False, **__A : List[Any], ): # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : Optional[Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else mask_token UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase : Optional[Any] = legacy_behaviour super().__init__( bos_token=__A, eos_token=__A, unk_token=__A, sep_token=__A, cls_token=__A, pad_token=__A, mask_token=__A, tokenizer_file=__A, src_lang=__A, tgt_lang=__A, additional_special_tokens=__A, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=__A, **__A, ) UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) UpperCAmelCase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase : Optional[Any] = 1 UpperCAmelCase : str = len(self.sp_model ) UpperCAmelCase : Dict = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } UpperCAmelCase : Tuple = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase : Union[str, Any] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCAmelCase : str = src_lang if src_lang is not None else '''eng_Latn''' UpperCAmelCase : str = self.lang_code_to_id[self._src_lang] UpperCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): UpperCAmelCase : List[str] = self.__dict__.copy() UpperCAmelCase : List[Any] = None UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any], __A : str ): UpperCAmelCase : List[str] = d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): UpperCAmelCase : Any = {} UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __magic_name__ ( self : Union[str, Any] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __magic_name__ ( self : Optional[int] ): return self._src_lang @src_lang.setter def __magic_name__ ( self : Optional[Any], __A : str ): UpperCAmelCase : Optional[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __magic_name__ ( self : str, __A : List[int], __A : Optional[List[int]] = None, __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A, token_ids_a=__A, already_has_special_tokens=__A ) UpperCAmelCase : Union[str, Any] = [1] * len(self.prefix_tokens ) UpperCAmelCase : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def __magic_name__ ( self : int, __A : List[int], __A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__ ( self : int, __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Dict = [self.sep_token_id] UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self : Dict, __A : Tuple, __A : str, __A : Optional[str], __A : Optional[str], **__A : Dict ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase : Any = src_lang UpperCAmelCase : Optional[int] = self(__A, add_special_tokens=__A, return_tensors=__A, **__A ) UpperCAmelCase : List[str] = self.convert_tokens_to_ids(__A ) UpperCAmelCase : str = tgt_lang_id return inputs def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __magic_name__ ( self : List[Any], __A : str ): return self.sp_model.encode(__A, out_type=__A ) def __magic_name__ ( self : List[Any], __A : Tuple ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __magic_name__ ( self : List[str], __A : Optional[Any] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __magic_name__ ( self : str, __A : Any ): UpperCAmelCase : List[Any] = ''''''.join(__A ).replace(__A, ''' ''' ).strip() return out_string def __magic_name__ ( self : Any, __A : str, __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : List[Any] = os.path.join( __A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, __A ) elif not os.path.isfile(self.vocab_file ): with open(__A, '''wb''' ) as fi: UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def __magic_name__ ( self : Union[str, Any], __A : List[str], __A : str = "eng_Latn", __A : Optional[List[str]] = None, __A : str = "fra_Latn", **__A : Any, ): UpperCAmelCase : List[Any] = src_lang UpperCAmelCase : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(__A, __A, **__A ) def __magic_name__ ( self : int ): return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__ ( self : List[str] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__ ( self : Dict, __A : Union[str, Any] ): UpperCAmelCase : Dict = self.lang_code_to_id[src_lang] if self.legacy_behaviour: UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Dict = [self.cur_lang_code] UpperCAmelCase : Union[str, Any] = [self.eos_token_id] def __magic_name__ ( self : Tuple, __A : str ): UpperCAmelCase : List[str] = self.lang_code_to_id[lang] if self.legacy_behaviour: UpperCAmelCase : int = [] UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Optional[Any] = [self.cur_lang_code] UpperCAmelCase : List[str] = [self.eos_token_id]
336
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a__ ( ) -> Dict: if os.name == "nt": UpperCAmelCase : List[str] = CursorInfo() UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def a__ ( ) -> Optional[int]: if os.name == "nt": UpperCAmelCase : int = CursorInfo() UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Any = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def a__ ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
336
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a__ ( ) -> Dict: if os.name == "nt": UpperCAmelCase : List[str] = CursorInfo() UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def a__ ( ) -> Optional[int]: if os.name == "nt": UpperCAmelCase : int = CursorInfo() UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Any = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def a__ ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
336
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Any = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """instructblip_vision_model""" def __init__( self : Union[str, Any], __A : List[Any]=1_4_0_8, __A : Any=6_1_4_4, __A : Tuple=3_9, __A : Any=1_6, __A : Optional[Any]=2_2_4, __A : List[Any]=1_4, __A : Dict="gelu", __A : int=1E-6, __A : Any=0.0, __A : Any=1E-10, __A : List[Any]=True, **__A : Optional[int], ): super().__init__(**__A ) UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : str = patch_size UpperCAmelCase : str = image_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : str = attention_dropout UpperCAmelCase : List[Any] = layer_norm_eps UpperCAmelCase : Any = hidden_act UpperCAmelCase : List[str] = qkv_bias @classmethod def __magic_name__ ( cls : Dict, __A : Union[str, os.PathLike], **__A : List[Any] ): cls._set_token_in_kwargs(__A ) UpperCAmelCase , UpperCAmelCase : Optional[int] = cls.get_config_dict(__A, **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''' ) == "instructblip": UpperCAmelCase : int = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """instructblip_qformer""" def __init__( self : Tuple, __A : str=3_0_5_2_2, __A : str=7_6_8, __A : List[Any]=1_2, __A : List[Any]=1_2, __A : List[Any]=3_0_7_2, __A : Any="gelu", __A : List[Any]=0.1, __A : Tuple=0.1, __A : Optional[Any]=5_1_2, __A : Optional[int]=0.0_2, __A : Tuple=1E-12, __A : Union[str, Any]=0, __A : List[str]="absolute", __A : Dict=2, __A : List[str]=1_4_0_8, **__A : Optional[Any], ): super().__init__(pad_token_id=__A, **__A ) UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : int = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : List[Any] = layer_norm_eps UpperCAmelCase : Any = position_embedding_type UpperCAmelCase : Tuple = cross_attention_frequency UpperCAmelCase : List[str] = encoder_hidden_size @classmethod def __magic_name__ ( cls : Union[str, Any], __A : Union[str, os.PathLike], **__A : str ): cls._set_token_in_kwargs(__A ) UpperCAmelCase , UpperCAmelCase : Optional[int] = cls.get_config_dict(__A, **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''' ) == "instructblip": UpperCAmelCase : Any = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """instructblip""" UpperCamelCase = True def __init__( self : List[Any], __A : Dict=None, __A : Dict=None, __A : Any=None, __A : Dict=3_2, **__A : Tuple ): super().__init__(**__A ) if vision_config is None: UpperCAmelCase : Any = {} logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' ) if qformer_config is None: UpperCAmelCase : Any = {} logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' ) if text_config is None: UpperCAmelCase : List[str] = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) UpperCAmelCase : Dict = InstructBlipVisionConfig(**__A ) UpperCAmelCase : List[str] = InstructBlipQFormerConfig(**__A ) UpperCAmelCase : List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' UpperCAmelCase : List[str] = CONFIG_MAPPING[text_model_type](**__A ) UpperCAmelCase : Tuple = self.text_config.tie_word_embeddings UpperCAmelCase : Tuple = self.text_config.is_encoder_decoder UpperCAmelCase : List[Any] = num_query_tokens UpperCAmelCase : Any = self.vision_config.hidden_size UpperCAmelCase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES UpperCAmelCase : Any = 1.0 UpperCAmelCase : List[Any] = 0.0_2 @classmethod def __magic_name__ ( cls : List[str], __A : InstructBlipVisionConfig, __A : InstructBlipQFormerConfig, __A : PretrainedConfig, **__A : int, ): return cls( vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__A, ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase : Tuple = self.vision_config.to_dict() UpperCAmelCase : str = self.qformer_config.to_dict() UpperCAmelCase : Union[str, Any] = self.text_config.to_dict() UpperCAmelCase : Any = self.__class__.model_type return output
336
from __future__ import annotations def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]: if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) UpperCAmelCase : str = number_of_bytes // partitions UpperCAmelCase : Dict = [] for i in range(UpperCAmelCase ): UpperCAmelCase : int = i * bytes_per_partition + 1 UpperCAmelCase : Optional[int] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
336
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
336
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]: if subparsers is not None: UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description ) else: UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments UpperCAmelCase : Optional[int] = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase ) return parser def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: UpperCAmelCase : List[Any] = defaults.command_file if not args.command and defaults.commands is not None: UpperCAmelCase : List[str] = defaults.commands if not args.tpu_name: UpperCAmelCase : Tuple = defaults.tpu_name if not args.tpu_zone: UpperCAmelCase : int = defaults.tpu_zone if args.accelerate_version == "dev": UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": UpperCAmelCase : Dict = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ): UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: UpperCAmelCase : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , UpperCAmelCase ): UpperCAmelCase : int = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate UpperCAmelCase : Optional[int] = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [f'''pip install {args.accelerate_version}'''] new_cmd += args.command UpperCAmelCase : int = '''; '''.join(UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess UpperCAmelCase : Any = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'''Running {" ".join(UpperCAmelCase )}''' ) return subprocess.run(UpperCAmelCase ) print('''Successfully setup pod.''' ) def a__ ( ) -> Any: UpperCAmelCase : Any = tpu_command_parser() UpperCAmelCase : Tuple = parser.parse_args() tpu_command_launcher(UpperCAmelCase )
336
1
def a__ ( UpperCAmelCase : str ) -> list: UpperCAmelCase : Optional[int] = [0] * len(UpperCAmelCase ) for i in range(1 , len(UpperCAmelCase ) ): # use last results for better performance - dynamic programming UpperCAmelCase : List[Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase : Any = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase : List[Any] = j return prefix_result def a__ ( UpperCAmelCase : str ) -> int: return max(prefix_function(UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
336
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[int] = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: print('''Loading config file...''' ) def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ): UpperCAmelCase : List[str] = [] for k, v in d.items(): UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k if isinstance(UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCAmelCase ) UpperCAmelCase : List[str] = argparse.Namespace() with open(UpperCAmelCase , '''r''' ) as yaml_file: try: UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader ) UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) ) return config def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]: UpperCAmelCase : int = MobileViTVaConfig() UpperCAmelCase : str = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase : Any = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : Any = 384 else: UpperCAmelCase : Tuple = 256 UpperCAmelCase : int = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase : Optional[Any] = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : str = 384 else: UpperCAmelCase : Dict = 256 UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase : Optional[Any] = 151 UpperCAmelCase : Tuple = 512 UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Tuple = True elif task_name.startswith('''voc_''' ): UpperCAmelCase : Dict = 21 UpperCAmelCase : str = 512 UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json''' UpperCAmelCase : Dict = True # orig_config UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase ) assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase : Union[str, Any] = '''huggingface/label-files''' UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : int = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : List[str] = val def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]: if base_model: UpperCAmelCase : Dict = '''''' else: UpperCAmelCase : Dict = '''mobilevitv2.''' UpperCAmelCase : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase : List[str] = k[8:] else: UpperCAmelCase : Dict = k if ".block." in k: UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase : Dict = [0, 1] elif i == 4: UpperCAmelCase : Dict = [0, 1, 2, 3] elif i == 5: UpperCAmelCase : int = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: UpperCAmelCase : Optional[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: UpperCAmelCase : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any: UpperCAmelCase : str = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def a__ ( ) -> Union[str, Any]: UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase ) # load original state_dict UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval() UpperCAmelCase : str = False else: UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval() UpperCAmelCase : Any = False # remove and rename some keys of load the original model UpperCAmelCase : Optional[Any] = checkpoint remove_unused_keys(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # load modified state_dict model.load_state_dict(UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase : Optional[Any] = outputs.logits UpperCAmelCase : int = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
336
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : Dict = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCAmelCase ( lowerCamelCase__ ): def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase : str = '''__cached_''' + self.fget.__name__ UpperCAmelCase : int = getattr(__A, __A, __A ) if cached is None: UpperCAmelCase : Any = self.fget(__A ) setattr(__A, __A, __A ) return cached def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_torch_fx_proxy(UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]: return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : str ) -> Tuple: return _is_numpy(UpperCAmelCase ) def a__ ( UpperCAmelCase : str ) -> List[Any]: import torch return isinstance(UpperCAmelCase , torch.Tensor ) def a__ ( UpperCAmelCase : str ) -> List[Any]: return False if not is_torch_available() else _is_torch(UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> List[str]: import torch return isinstance(UpperCAmelCase , torch.device ) def a__ ( UpperCAmelCase : Any ) -> Any: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: import torch if isinstance(UpperCAmelCase , UpperCAmelCase ): if hasattr(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ) else: return False return isinstance(UpperCAmelCase , torch.dtype ) def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase ) def a__ ( UpperCAmelCase : Any ) -> str: import tensorflow as tf return isinstance(UpperCAmelCase , tf.Tensor ) def a__ ( UpperCAmelCase : int ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[str] ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(UpperCAmelCase ) return type(UpperCAmelCase ) == tf.Tensor def a__ ( UpperCAmelCase : int ) -> List[Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[Any] ) -> Dict: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase , jnp.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]: return False if not is_flax_available() else _is_jax(UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Tuple: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return [to_py_obj(UpperCAmelCase ) for o in obj] elif is_tf_tensor(UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ).tolist() elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( UpperCAmelCase : Any ) -> List[str]: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return np.array(UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ) else: return obj class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = fields(self ) # Safety and consistency checks if not len(__A ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase : int = getattr(self, class_fields[0].name ) UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__A ): if isinstance(__A, __A ): UpperCAmelCase : Tuple = first_field.items() UpperCAmelCase : Any = True else: try: UpperCAmelCase : Optional[Any] = iter(__A ) UpperCAmelCase : Optional[Any] = True except TypeError: UpperCAmelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__A ): if ( not isinstance(__A, (list, tuple) ) or not len(__A ) == 2 or not isinstance(element[0], __A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: UpperCAmelCase : Union[str, Any] = element[1] elif first_field is not None: UpperCAmelCase : Union[str, Any] = first_field else: for field in class_fields: UpperCAmelCase : Optional[Any] = getattr(self, field.name ) if v is not None: UpperCAmelCase : Optional[int] = v def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Any, *__A : Dict, **__A : str ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str], __A : List[str] ): if isinstance(__A, __A ): UpperCAmelCase : int = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__A, __A ) super().__setattr__(__A, __A ) def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ): # Will raise a KeyException if needed super().__setitem__(__A, __A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__A, __A ) def __magic_name__ ( self : List[str] ): return tuple(self[k] for k in self.keys() ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def __magic_name__ ( cls : List[Any], __A : Tuple ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """longest""" UpperCamelCase = """max_length""" UpperCamelCase = """do_not_pad""" class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """pt""" UpperCamelCase = """tf""" UpperCamelCase = """np""" UpperCamelCase = """jax""" class __UpperCAmelCase : def __init__( self : Any, __A : List[ContextManager] ): UpperCAmelCase : Tuple = context_managers UpperCAmelCase : Tuple = ExitStack() def __enter__( self : Any ): for context_manager in self.context_managers: self.stack.enter_context(__A ) def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ): self.stack.__exit__(*__A, **__A ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : int = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( UpperCAmelCase : Dict ) -> Any: UpperCAmelCase : List[Any] = model_class.__name__ UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]: def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ): for k, v in d.items(): UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k if v and isinstance(UpperCAmelCase , UpperCAmelCase ): yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) @contextmanager def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]: if is_numpy_array(UpperCAmelCase ): return np.transpose(UpperCAmelCase , axes=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.T if axes is None else array.permute(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.reshape(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.reshape(UpperCAmelCase , UpperCAmelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any: if is_numpy_array(UpperCAmelCase ): return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if is_numpy_array(UpperCAmelCase ): return np.expand_dims(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.unsqueeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.size(UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.numel() elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.size(UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict: for key, value in auto_map.items(): if isinstance(UpperCAmelCase , (tuple, list) ): UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase : List[Any] = f'''{repo_id}--{value}''' return auto_map def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCAmelCase ): UpperCAmelCase : Any = base_class.__module__ UpperCAmelCase : Dict = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
336
1
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Any ) -> Optional[Any]: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(UpperCAmelCase ): for j in range(UpperCAmelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : int ) -> Optional[int]: UpperCAmelCase : Any = [[float('''inf''' ) for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] for i in range(UpperCAmelCase ): for j in range(UpperCAmelCase ): UpperCAmelCase : Any = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(UpperCAmelCase ): # looping through rows of graph array for i in range(UpperCAmelCase ): # looping through columns of graph array for j in range(UpperCAmelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): UpperCAmelCase : Tuple = dist[i][k] + dist[k][j] _print_dist(UpperCAmelCase , UpperCAmelCase ) return dist, v if __name__ == "__main__": _lowerCamelCase : Optional[Any] = int(input("Enter number of vertices: ")) _lowerCamelCase : List[Any] = int(input("Enter number of edges: ")) _lowerCamelCase : Optional[int] = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): _lowerCamelCase : Optional[int] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) _lowerCamelCase : Dict = int(input("Enter source:")) _lowerCamelCase : Tuple = int(input("Enter destination:")) _lowerCamelCase : Union[str, Any] = float(input("Enter weight:")) _lowerCamelCase : str = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
336
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LayoutLMTokenizer UpperCamelCase = LayoutLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def __magic_name__ ( self : Any ): super().setUp() UpperCAmelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__ ( self : Union[str, Any], **__A : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Optional[int], __A : int ): UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running''' UpperCAmelCase : Optional[int] = '''unwanted, running''' return input_text, output_text def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] ) def __magic_name__ ( self : Optional[int] ): pass
336
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder _lowerCamelCase : Optional[int] = datasets.utils.logging.get_logger(__name__) class __UpperCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __UpperCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = """audio""" UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column="""audio""" , label_column="""label""" ) _lowerCamelCase : List[Any] = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] _lowerCamelCase : Tuple = AUDIO_EXTENSIONS
336
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = mask_ratio UpperCAmelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ): UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ): UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A ) UpperCAmelCase : int = model(__A, training=__A ) # expected sequence length = num_patches UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A ) UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(__A, training=__A ) UpperCAmelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = TFViTMAEModelTester(self ) UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : str ): UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(__A ) UpperCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __magic_name__ ( self : int ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : Dict = model(__A, noise=__A ) UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A ) UpperCAmelCase : Dict = outputs_dict[0].numpy() UpperCAmelCase : Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 ) def __magic_name__ ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A : Union[str, Any] ): UpperCAmelCase : str = {} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): UpperCAmelCase : Tuple = v.numpy() else: UpperCAmelCase : str = np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : Any = self._prepare_for_class(__A, __A ) UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A ) UpperCAmelCase : str = model(__A, noise=__A ) UpperCAmelCase : str = model(**__A, noise=__A ) self.assert_outputs_same(__A, __A ) def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ): # make masks reproducible np.random.seed(2 ) UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : int = tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : List[Any] = tf_noise super().check_pt_tf_models(__A, __A, __A ) def __magic_name__ ( self : str ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__A, __A ),) if isinstance(__A, __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A, '''_keras_serializable''', __A ) } UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(__A ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : Tuple = main_layer_class(__A ) UpperCAmelCase : int = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) ) UpperCAmelCase : List[Any] = model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' ) model.save(__A ) UpperCAmelCase : List[str] = tf.keras.models.load_model( __A, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A, tf.keras.Model ) UpperCAmelCase : Tuple = model(__A ) self.assert_outputs_same(__A, __A ) @slow def __magic_name__ ( self : Dict ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(__A ) UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A ) UpperCAmelCase : Union[str, Any] = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() UpperCAmelCase : Union[str, Any] = 0 else: UpperCAmelCase : Optional[int] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A, saved_model=__A ) UpperCAmelCase : Dict = model_class.from_pretrained(__A ) UpperCAmelCase : str = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy() UpperCAmelCase : Dict = 0 else: UpperCAmelCase : Any = after_outputs['''logits'''].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A, 1E-5 ) def __magic_name__ ( self : Optional[Any] ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : List[Any] = model(__A, noise=__A ) UpperCAmelCase : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) UpperCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : str = model_class.from_config(model.config ) UpperCAmelCase : List[str] = new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Tuple = new_model(__A, noise=__A ) self.assert_outputs_same(__A, __A ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Dict: UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__ ( self : str ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : Optional[int] = ViTMAEConfig() UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : Optional[int] = model(**__A, noise=__A ) # verify the logits UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
336
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class __UpperCAmelCase : def __init__( self : Union[str, Any], __A : int, __A : Optional[Any]=1_3, __A : Optional[int]=7, __A : int=True, __A : Dict=True, __A : str=True, __A : Dict=True, __A : Optional[Any]=9_9, __A : Tuple=3_2, __A : Optional[int]=2, __A : Any=4, __A : Dict=3_7, __A : Optional[int]="gelu", __A : Any=0.1, __A : List[str]=0.1, __A : Tuple=5_1_2, __A : Dict=1_6, __A : List[Any]=2, __A : int=0.0_2, __A : Dict=False, __A : Optional[Any]=True, __A : Tuple="None", __A : Tuple=3, __A : Dict=4, __A : Optional[Any]=None, ): UpperCAmelCase : List[str] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : Dict = seq_length UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Optional[Any] = use_input_mask UpperCAmelCase : Dict = use_token_type_ids UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : Any = max_position_embeddings UpperCAmelCase : Tuple = type_vocab_size UpperCAmelCase : Tuple = type_sequence_label_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : str = num_choices UpperCAmelCase : Optional[Any] = relative_attention UpperCAmelCase : List[Any] = position_biased_input UpperCAmelCase : int = pos_att_type UpperCAmelCase : Union[str, Any] = scope def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[Any] = None if self.use_token_type_ids: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : List[Any] = None UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase : str = DebertaVaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=__A, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : Any, __A : List[Any], __A : int, __A : List[str], __A : Dict, __A : Optional[int], __A : Dict, __A : Tuple ): UpperCAmelCase : int = TFDebertaVaModel(config=__A ) UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase : Optional[Any] = [input_ids, input_mask] UpperCAmelCase : Optional[Any] = model(__A ) UpperCAmelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : str, __A : List[str], __A : List[str], __A : Optional[Any], __A : Tuple, __A : Optional[Any], __A : List[Any], __A : Dict ): UpperCAmelCase : List[Any] = TFDebertaVaForMaskedLM(config=__A ) UpperCAmelCase : Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Tuple, __A : Optional[int], __A : str, __A : str, __A : List[Any], __A : Optional[int], __A : Tuple, __A : Any ): UpperCAmelCase : str = self.num_labels UpperCAmelCase : List[Any] = TFDebertaVaForSequenceClassification(config=__A ) UpperCAmelCase : Union[str, Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : int, __A : Optional[Any], __A : Optional[Any], __A : Any, __A : Any, __A : Tuple, __A : Union[str, Any], __A : int ): UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Any = TFDebertaVaForTokenClassification(config=__A ) UpperCAmelCase : str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Optional[int] = model(__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Optional[int], __A : int, __A : Tuple, __A : int, __A : Any, __A : Dict, __A : List[Any], __A : Optional[Any] ): UpperCAmelCase : Tuple = TFDebertaVaForQuestionAnswering(config=__A ) UpperCAmelCase : List[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def __magic_name__ ( self : str ): UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : str = config_and_inputs UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase = ( { """feature-extraction""": TFDebertaVaModel, """fill-mask""": TFDebertaVaForMaskedLM, """question-answering""": TFDebertaVaForQuestionAnswering, """text-classification""": TFDebertaVaForSequenceClassification, """token-classification""": TFDebertaVaForTokenClassification, """zero-shot""": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Tuple ): UpperCAmelCase : int = TFDebertaVaModelTester(self ) UpperCAmelCase : Tuple = ConfigTester(self, config_class=__A, hidden_size=3_7 ) def __magic_name__ ( self : Any ): self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__A ) @slow def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Optional[int] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(__A ) @require_tf class __UpperCAmelCase ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def __magic_name__ ( self : Any ): pass @slow def __magic_name__ ( self : Tuple ): UpperCAmelCase : int = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) UpperCAmelCase : str = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A )[0] UpperCAmelCase : int = tf.constant( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4], __A, atol=1E-4 )
336
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """speech_to_text_2""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[Any], __A : Optional[Any]=1_0_0_0_0, __A : Tuple=6, __A : Tuple=2_0_4_8, __A : Any=4, __A : Optional[int]=0.0, __A : Optional[Any]=True, __A : Any="relu", __A : Tuple=2_5_6, __A : int=0.1, __A : Optional[Any]=0.0, __A : Tuple=0.0, __A : int=0.0_2, __A : Optional[Any]=2, __A : Tuple=True, __A : str=1, __A : Union[str, Any]=0, __A : Dict=2, __A : str=1_0_2_4, **__A : Union[str, Any], ): UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : int = d_model UpperCAmelCase : Tuple = decoder_ffn_dim UpperCAmelCase : Tuple = decoder_layers UpperCAmelCase : List[str] = decoder_attention_heads UpperCAmelCase : List[str] = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Dict = activation_dropout UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : Tuple = init_std UpperCAmelCase : Union[str, Any] = decoder_layerdrop UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Dict = decoder_layers UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : str = max_target_positions super().__init__( pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, decoder_start_token_id=__A, **__A, )
336
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
1
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LayoutLMTokenizer UpperCamelCase = LayoutLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def __magic_name__ ( self : Any ): super().setUp() UpperCAmelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__ ( self : Union[str, Any], **__A : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Optional[int], __A : int ): UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running''' UpperCAmelCase : Optional[int] = '''unwanted, running''' return input_text, output_text def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] ) def __magic_name__ ( self : Optional[int] ): pass
336
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCAmelCase : def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Any = hidden_act UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope UpperCAmelCase : List[str] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : str = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, ) def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ): UpperCAmelCase : int = ViTHybridModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ): UpperCAmelCase : str = self.type_sequence_label_size UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ViTHybridModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : int ): UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, nn.Linear ) ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(__A ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : str = [*signature.parameters.keys()] UpperCAmelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = _config_zero_init(__A ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(config=__A ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def __magic_name__ ( self : List[str] ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> Tuple: UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __A ) UpperCAmelCase : Tuple = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**__A ) # verify the logits UpperCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow @require_accelerate def __magic_name__ ( self : Dict ): UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**__A ) UpperCAmelCase : Any = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Dict = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
336
1
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Tuple, __A : int, __A : List[Any]=1_3, __A : Optional[int]=3_0, __A : int=2, __A : List[str]=3, __A : List[Any]=True, __A : List[Any]=True, __A : Optional[Any]=3_2, __A : Any=2, __A : Tuple=4, __A : Dict=3_7, __A : List[str]="gelu", __A : str=0.1, __A : Dict=0.1, __A : int=1_0, __A : Union[str, Any]=0.0_2, __A : List[Any]=3, __A : Any=None, ): UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : Tuple = batch_size UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : str = use_labels UpperCAmelCase : Any = hidden_size UpperCAmelCase : int = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Tuple = type_sequence_label_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Dict = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 UpperCAmelCase : Optional[Any] = num_patches + 1 def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Optional[int] = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, ) def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : str, __A : int ): UpperCAmelCase : Dict = TFViTModel(config=__A ) UpperCAmelCase : Optional[int] = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase : Any = self.image_size // 2 UpperCAmelCase : Tuple = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase : Any = model(__A, interpolate_pos_encoding=__A, training=__A ) UpperCAmelCase : List[str] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size) ) def __magic_name__ ( self : Optional[Any], __A : List[Any], __A : int, __A : int ): UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase : Dict = TFViTForImageClassification(__A ) UpperCAmelCase : str = model(__A, labels=__A, training=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase : List[Any] = self.image_size // 2 UpperCAmelCase : List[str] = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase : Any = model(__A, interpolate_pos_encoding=__A, training=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Union[str, Any] = TFViTForImageClassification(__A ) UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : Optional[int] = model(__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = config_and_inputs UpperCAmelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCamelCase = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Dict = TFViTModelTester(self ) UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : Dict ): pass def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(__A ) UpperCAmelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Tuple = [*signature.parameters.keys()] UpperCAmelCase : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Optional[Any] = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Any: UpperCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : str = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase : int = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Any = image_processor(images=__A, return_tensors='''tf''' ) # forward pass UpperCAmelCase : Tuple = model(**__A ) # verify the logits UpperCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[int] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3], __A, atol=1E-4 )
336
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> str: # Load configuration defined in the metadata file with open(UpperCAmelCase ) as metadata_file: UpperCAmelCase : int = json.load(UpperCAmelCase ) UpperCAmelCase : Dict = LukeConfig(use_entity_aware_attention=UpperCAmelCase , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path UpperCAmelCase : Dict = torch.load(UpperCAmelCase , map_location='''cpu''' )['''module'''] # Load the entity vocab file UpperCAmelCase : Optional[Any] = load_original_entity_vocab(UpperCAmelCase ) # add an entry for [MASK2] UpperCAmelCase : List[Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 UpperCAmelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks UpperCAmelCase : Tuple = AddedToken('''<ent>''' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) UpperCAmelCase : str = AddedToken('''<ent2>''' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''r''' ) as f: UpperCAmelCase : Any = json.load(UpperCAmelCase ) UpperCAmelCase : List[Any] = '''MLukeTokenizer''' with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) UpperCAmelCase : str = MLukeTokenizer.from_pretrained(UpperCAmelCase ) # Initialize the embeddings of the special tokens UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(['''@'''] )[0] UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0] UpperCAmelCase : Dict = state_dict['''embeddings.word_embeddings.weight'''] UpperCAmelCase : List[Any] = word_emb[ent_init_index].unsqueeze(0 ) UpperCAmelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) UpperCAmelCase : Any = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: UpperCAmelCase : Optional[Any] = state_dict[bias_name] UpperCAmelCase : Dict = decoder_bias[ent_init_index].unsqueeze(0 ) UpperCAmelCase : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 ) UpperCAmelCase : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCAmelCase : List[str] = f'''encoder.layer.{layer_index}.attention.self.''' UpperCAmelCase : Optional[int] = state_dict[prefix + matrix_name] UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] UpperCAmelCase : Tuple = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) UpperCAmelCase : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' UpperCAmelCase : Optional[Any] = state_dict['''entity_predictions.bias'''] UpperCAmelCase : List[Any] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) UpperCAmelCase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] ) UpperCAmelCase : int = LukeForMaskedLM(config=UpperCAmelCase ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) UpperCAmelCase : List[Any] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): UpperCAmelCase : Tuple = state_dict[key] else: UpperCAmelCase : Any = state_dict[key] UpperCAmelCase , UpperCAmelCase : Dict = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) if set(UpperCAmelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(UpperCAmelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs UpperCAmelCase : Optional[Any] = MLukeTokenizer.from_pretrained(UpperCAmelCase , task='''entity_classification''' ) UpperCAmelCase : List[str] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' UpperCAmelCase : Optional[int] = (0, 9) UpperCAmelCase : List[Any] = tokenizer(UpperCAmelCase , entity_spans=[span] , return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**UpperCAmelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base UpperCAmelCase : List[Any] = torch.Size((1, 33, 768) ) UpperCAmelCase : Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base UpperCAmelCase : Tuple = torch.Size((1, 1, 768) ) UpperCAmelCase : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction UpperCAmelCase : Any = MLukeTokenizer.from_pretrained(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = '''Tokyo is the capital of <mask>.''' UpperCAmelCase : List[Any] = (24, 30) UpperCAmelCase : Optional[int] = tokenizer(UpperCAmelCase , entity_spans=[span] , return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = encoding['''input_ids'''][0].tolist() UpperCAmelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) UpperCAmelCase : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item() UpperCAmelCase : str = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCAmelCase ) ) model.save_pretrained(UpperCAmelCase ) def a__ ( UpperCAmelCase : Optional[int] ) -> str: UpperCAmelCase : Optional[Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] UpperCAmelCase : Optional[Any] = [json.loads(UpperCAmelCase ) for line in open(UpperCAmelCase )] UpperCAmelCase : List[Any] = {} for entry in data: UpperCAmelCase : List[Any] = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: UpperCAmelCase : str = entity_id break UpperCAmelCase : Tuple = f'''{language}:{entity_name}''' UpperCAmelCase : Any = entity_id return new_mapping if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) _lowerCamelCase : List[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
336
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCAmelCase : def __magic_name__ ( self : int, __A : Dict ): raise NotImplementedError() def __magic_name__ ( self : int ): raise NotImplementedError() class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ): UpperCAmelCase : List[str] = tokenizer UpperCAmelCase : str = skip_prompt UpperCAmelCase : List[str] = decode_kwargs # variables used in the streaming process UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = 0 UpperCAmelCase : Union[str, Any] = True def __magic_name__ ( self : Dict, __A : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: UpperCAmelCase : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] UpperCAmelCase : int = [] UpperCAmelCase : int = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __magic_name__ ( self : str ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) UpperCAmelCase : Dict = text[self.print_len :] UpperCAmelCase : List[Any] = [] UpperCAmelCase : List[Any] = 0 else: UpperCAmelCase : Dict = '''''' UpperCAmelCase : str = True self.on_finalized_text(__A, stream_end=__A ) def __magic_name__ ( self : List[str], __A : str, __A : bool = False ): print(__A, flush=__A, end='''''' if not stream_end else None ) def __magic_name__ ( self : List[Any], __A : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ): super().__init__(__A, __A, **__A ) UpperCAmelCase : Dict = Queue() UpperCAmelCase : Any = None UpperCAmelCase : Any = timeout def __magic_name__ ( self : Dict, __A : str, __A : bool = False ): self.text_queue.put(__A, timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout ) def __iter__( self : int ): return self def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
336
1
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str: return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
336
import numpy # List of input, output pairs _lowerCamelCase : Dict = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _lowerCamelCase : Dict = [2, 4, 1, 5] _lowerCamelCase : Dict = len(train_data) _lowerCamelCase : int = 0.0_0_9 def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict: return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output( UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Any: UpperCAmelCase : str = 0 for i in range(len(UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict: UpperCAmelCase : Optional[int] = 0 for i in range(UpperCAmelCase ): if index == -1: summation_value += _error(UpperCAmelCase ) else: summation_value += _error(UpperCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( UpperCAmelCase : Dict ) -> Dict: UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] = 0.000002 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = get_cost_derivative(i - 1 ) UpperCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ): break UpperCAmelCase : int = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ) -> List[Any]: for i in range(len(UpperCAmelCase ) ): print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
336
1
class __UpperCAmelCase : def __init__( self : List[Any], __A : list ): UpperCAmelCase : Tuple = set_counts UpperCAmelCase : Union[str, Any] = max(__A ) UpperCAmelCase : Optional[Any] = len(__A ) UpperCAmelCase : List[Any] = [1] * num_sets UpperCAmelCase : List[Any] = list(range(__A ) ) def __magic_name__ ( self : List[Any], __A : int, __A : int ): UpperCAmelCase : Any = self.get_parent(__A ) UpperCAmelCase : Dict = self.get_parent(__A ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCAmelCase : Tuple = 0 UpperCAmelCase : List[str] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCAmelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCAmelCase : str = 0 UpperCAmelCase : Optional[Any] = src_parent UpperCAmelCase : Any = self.set_counts[src_parent] UpperCAmelCase : Tuple = max(self.max_set, __A ) return True def __magic_name__ ( self : List[Any], __A : int ): if self.parents[disj_set] == disj_set: return disj_set UpperCAmelCase : List[str] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
336
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
1
import os def a__ ( UpperCAmelCase : str = "input.txt" ) -> int: with open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) as input_file: UpperCAmelCase : Tuple = [ [int(UpperCAmelCase ) for element in line.split(''',''' )] for line in input_file.readlines() ] UpperCAmelCase : Tuple = len(UpperCAmelCase ) UpperCAmelCase : Optional[int] = len(matrix[0] ) UpperCAmelCase : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] for i in range(UpperCAmelCase ): UpperCAmelCase : int = matrix[i][0] for j in range(1 , UpperCAmelCase ): for i in range(UpperCAmelCase ): UpperCAmelCase : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , UpperCAmelCase ): UpperCAmelCase : List[str] = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): UpperCAmelCase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
336
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''), (f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''), (f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''), (f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any: for i in range(config.num_hidden_layers ): UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : str = q_bias UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) UpperCAmelCase : str = gamma_a UpperCAmelCase : Dict = gamma_a def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : str = val def a__ ( ) -> Optional[int]: UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]: UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase : List[Any] = 1_024 UpperCAmelCase : Optional[Any] = 4_096 UpperCAmelCase : Any = 24 UpperCAmelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : List[Any] = '''huggingface/label-files''' UpperCAmelCase : Any = '''rvlcdip-id2label.json''' UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image UpperCAmelCase : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str = encoding['''pixel_values'''] UpperCAmelCase : Any = model(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = outputs.logits # verify logits UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
336
1
from abc import ABC, abstractmethod from typing import List, Optional class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : List[str] ): # test for the above condition self.test() def __magic_name__ ( self : int ): UpperCAmelCase : Any = 0 UpperCAmelCase : Union[str, Any] = False while not completed: if counter == 1: self.reset() UpperCAmelCase : str = self.advance() if not self.does_advance(__A ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.update(__A ) counter += 1 if counter > 1_0_0_0_0: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __magic_name__ ( self : Dict ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __magic_name__ ( self : int, __A : int ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __magic_name__ ( self : Dict, __A : int ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __magic_name__ ( self : Optional[Any] ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __magic_name__ ( self : Any ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __magic_name__ ( self : List[Any], __A : int=False ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Union[str, Any], __A : List[int] ): super(__A, self ).__init__() if not isinstance(__A, __A ) or len(__A ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(__A, __A ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase : List[Any] = token_ids UpperCAmelCase : List[str] = len(self.token_ids ) UpperCAmelCase : List[str] = -1 # the index of the currently fulfilled step UpperCAmelCase : Dict = False def __magic_name__ ( self : str ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __magic_name__ ( self : Union[str, Any], __A : int ): if not isinstance(__A, __A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__A )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __magic_name__ ( self : str, __A : int ): if not isinstance(__A, __A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__A )}''' ) UpperCAmelCase : int = False UpperCAmelCase : str = False UpperCAmelCase : int = False if self.does_advance(__A ): self.fulfilled_idx += 1 UpperCAmelCase : Union[str, Any] = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase : int = True UpperCAmelCase : Any = completed else: # failed to make progress. UpperCAmelCase : Optional[int] = True self.reset() return stepped, completed, reset def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[Any] = False UpperCAmelCase : List[Any] = 0 def __magic_name__ ( self : Dict ): return self.seqlen - (self.fulfilled_idx + 1) def __magic_name__ ( self : Dict, __A : str=False ): UpperCAmelCase : Optional[int] = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase : Union[str, Any] = self.seqlen UpperCAmelCase : str = self.fulfilled_idx UpperCAmelCase : Union[str, Any] = self.completed return new_constraint class __UpperCAmelCase : def __init__( self : Optional[Any], __A : List[List[int]], __A : Tuple=True ): UpperCAmelCase : Union[str, Any] = max([len(__A ) for one in nested_token_ids] ) UpperCAmelCase : int = {} for token_ids in nested_token_ids: UpperCAmelCase : str = root for tidx, token_id in enumerate(__A ): if token_id not in level: UpperCAmelCase : Union[str, Any] = {} UpperCAmelCase : int = level[token_id] if no_subsets and self.has_subsets(__A, __A ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F''' {nested_token_ids}.''' ) UpperCAmelCase : str = root def __magic_name__ ( self : Union[str, Any], __A : Any ): UpperCAmelCase : Union[str, Any] = self.trie for current_token in current_seq: UpperCAmelCase : Optional[int] = start[current_token] UpperCAmelCase : int = list(start.keys() ) return next_tokens def __magic_name__ ( self : Dict, __A : Optional[int] ): UpperCAmelCase : Tuple = self.next_tokens(__A ) return len(__A ) == 0 def __magic_name__ ( self : Optional[Any], __A : int ): UpperCAmelCase : List[Any] = list(root.values() ) if len(__A ) == 0: return 1 else: return sum([self.count_leaves(__A ) for nn in next_nodes] ) def __magic_name__ ( self : Union[str, Any], __A : Optional[int], __A : str ): UpperCAmelCase : Optional[Any] = self.count_leaves(__A ) return len(__A ) != leaf_count class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : List[str], __A : List[List[int]] ): super(__A, self ).__init__() if not isinstance(__A, __A ) or len(__A ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(__A, __A ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(__A, __A ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase : Union[str, Any] = DisjunctiveTrie(__A ) UpperCAmelCase : Any = nested_token_ids UpperCAmelCase : Tuple = self.trie.max_height UpperCAmelCase : List[str] = [] UpperCAmelCase : Optional[int] = False def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Any = self.trie.next_tokens(self.current_seq ) if len(__A ) == 0: return None else: return token_list def __magic_name__ ( self : Union[str, Any], __A : int ): if not isinstance(__A, __A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}''' ) UpperCAmelCase : List[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __magic_name__ ( self : str, __A : int ): if not isinstance(__A, __A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}''' ) UpperCAmelCase : int = False UpperCAmelCase : str = False UpperCAmelCase : Optional[Any] = False if self.does_advance(__A ): self.current_seq.append(__A ) UpperCAmelCase : str = True else: UpperCAmelCase : str = True self.reset() UpperCAmelCase : Optional[int] = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase : Dict = completed return stepped, completed, reset def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Dict = False UpperCAmelCase : List[Any] = [] def __magic_name__ ( self : int ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __magic_name__ ( self : Optional[Any], __A : Union[str, Any]=False ): UpperCAmelCase : Tuple = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase : Tuple = self.seqlen UpperCAmelCase : int = self.current_seq UpperCAmelCase : Optional[int] = self.completed return new_constraint class __UpperCAmelCase : def __init__( self : Tuple, __A : List[Constraint] ): UpperCAmelCase : Dict = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase : List[str] = max([c.seqlen for c in constraints] ) UpperCAmelCase : Optional[Any] = len(__A ) UpperCAmelCase : str = False self.init_state() def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : int = [constraint.copy(stateful=__A ) for constraint in self.constraints] def __magic_name__ ( self : Dict ): UpperCAmelCase : Any = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : int = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase : str = constraint.advance() if isinstance(__A, __A ): token_list.append(__A ) elif isinstance(__A, __A ): token_list.extend(__A ) else: UpperCAmelCase : List[Any] = self.inprogress_constraint.advance() if isinstance(__A, __A ): token_list.append(__A ) elif isinstance(__A, __A ): token_list.extend(__A ) if len(__A ) == 0: return None else: return token_list def __magic_name__ ( self : Any, __A : Optional[List[int]] ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase , UpperCAmelCase : str = self.add(__A ) # the entire list of constraints are fulfilled if self.completed: break def __magic_name__ ( self : int, __A : int ): if not isinstance(__A, __A ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase , UpperCAmelCase : int = False, False if self.completed: UpperCAmelCase : Optional[int] = True UpperCAmelCase : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.inprogress_constraint.update(__A ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__A ) ) UpperCAmelCase : str = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase : Optional[int] = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase : List[str] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__A ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = pending_constraint.update(__A ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(__A ) UpperCAmelCase : Optional[Any] = None if not complete and stepped: UpperCAmelCase : Tuple = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase : str = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase : List[str] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __magic_name__ ( self : Any, __A : int=True ): UpperCAmelCase : Optional[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase : Optional[int] = [ constraint.copy(stateful=__A ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase : int = self.inprogress_constraint.copy(stateful=__A ) UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints] return new_state
336
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
1
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = CpmAntTokenizer UpperCamelCase = False def __magic_name__ ( self : Dict ): super().setUp() UpperCAmelCase : Optional[int] = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] UpperCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def __magic_name__ ( self : Any ): UpperCAmelCase : List[str] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) UpperCAmelCase : Union[str, Any] = '''今天天气真好!''' UpperCAmelCase : Optional[Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] UpperCAmelCase : str = tokenizer.tokenize(__A ) self.assertListEqual(__A, __A ) UpperCAmelCase : Dict = '''今天天气真好!''' UpperCAmelCase : Optional[int] = [tokenizer.bos_token] + tokens UpperCAmelCase : Optional[Any] = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), __A ) UpperCAmelCase : List[str] = tokenizer.decode(__A ) self.assertEqual(__A, __A )
336
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = {"vocab_file": "vocab.txt"} _lowerCamelCase : List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase : List[Any] = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def a__ ( UpperCAmelCase : List[str] ) -> Any: with open(UpperCAmelCase , '''r''' ) as f: UpperCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ): super().__init__(**__A ) UpperCAmelCase : Tuple = load_vocab_file(__A ) UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Union[str, Any] = unk_token UpperCAmelCase : Optional[Any] = cls_token UpperCAmelCase : Optional[int] = pad_token UpperCAmelCase : Optional[int] = mask_token UpperCAmelCase : List[str] = eos_token UpperCAmelCase : Optional[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __magic_name__ ( self : Tuple, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : List[Any], __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ): return text.split() def __magic_name__ ( self : Optional[int], __A : Dict=False ): return len(self._id_to_token ) def __magic_name__ ( self : int ): return {token: i for i, token in enumerate(self.all_tokens )} def __magic_name__ ( self : Tuple, __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: mask += [0] * len(__A ) + [1] return mask def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ): UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__A, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __magic_name__ ( self : Dict ): return self.get_vocab_size(with_added_tokens=__A ) def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ): return super()._add_tokens(__A, special_tokens=__A )
336
1
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters _lowerCamelCase : str = (7_2_0, 1_2_8_0) # Height, Width _lowerCamelCase : Dict = (0.4, 0.6) # if height or width lower than this scale, drop it. _lowerCamelCase : List[str] = 1 / 1_0_0 _lowerCamelCase : List[str] = "" _lowerCamelCase : Tuple = "" _lowerCamelCase : Optional[Any] = "" _lowerCamelCase : Tuple = 2_5_0 def a__ ( ) -> None: UpperCAmelCase , UpperCAmelCase : int = get_dataset(UpperCAmelCase , UpperCAmelCase ) for index in range(UpperCAmelCase ): UpperCAmelCase : str = random.sample(range(len(UpperCAmelCase ) ) , 4 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = update_image_and_anno( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , filter_scale=UpperCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCAmelCase : Dict = random_chars(32 ) UpperCAmelCase : Union[str, Any] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCAmelCase : Optional[int] = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(f'''{file_root}.jpg''' , UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) UpperCAmelCase : Tuple = [] for anno in new_annos: UpperCAmelCase : str = anno[3] - anno[1] UpperCAmelCase : Tuple = anno[4] - anno[2] UpperCAmelCase : Dict = anno[1] + width / 2 UpperCAmelCase : Any = anno[2] + height / 2 UpperCAmelCase : List[str] = f'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(UpperCAmelCase ) with open(f'''{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> tuple[list, list]: UpperCAmelCase : str = [] UpperCAmelCase : Any = [] for label_file in glob.glob(os.path.join(UpperCAmelCase , '''*.txt''' ) ): UpperCAmelCase : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCAmelCase ) as in_file: UpperCAmelCase : Tuple = in_file.readlines() UpperCAmelCase : int = os.path.join(UpperCAmelCase , f'''{label_name}.jpg''' ) UpperCAmelCase : Any = [] for obj_list in obj_lists: UpperCAmelCase : int = obj_list.rstrip('''\n''' ).split(''' ''' ) UpperCAmelCase : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2 UpperCAmelCase : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2 UpperCAmelCase : Any = float(obj[1] ) + float(obj[3] ) / 2 UpperCAmelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(UpperCAmelCase ) labels.append(UpperCAmelCase ) return img_paths, labels def a__ ( UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : list[int] , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : tuple[float, float] , UpperCAmelCase : float = 0.0 , ) -> tuple[list, list, str]: UpperCAmelCase : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCAmelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCAmelCase : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCAmelCase : Optional[int] = int(scale_x * output_size[1] ) UpperCAmelCase : Any = int(scale_y * output_size[0] ) UpperCAmelCase : Tuple = [] UpperCAmelCase : Any = [] for i, index in enumerate(UpperCAmelCase ): UpperCAmelCase : List[str] = all_img_list[index] path_list.append(UpperCAmelCase ) UpperCAmelCase : Dict = all_annos[index] UpperCAmelCase : Tuple = cva.imread(UpperCAmelCase ) if i == 0: # top-left UpperCAmelCase : Dict = cva.resize(UpperCAmelCase , (divid_point_x, divid_point_y) ) UpperCAmelCase : Dict = img for bbox in img_annos: UpperCAmelCase : List[Any] = bbox[1] * scale_x UpperCAmelCase : Tuple = bbox[2] * scale_y UpperCAmelCase : Union[str, Any] = bbox[3] * scale_x UpperCAmelCase : Any = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCAmelCase : List[Any] = cva.resize(UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) UpperCAmelCase : Tuple = img for bbox in img_annos: UpperCAmelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x) UpperCAmelCase : int = bbox[2] * scale_y UpperCAmelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x) UpperCAmelCase : Any = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCAmelCase : Dict = cva.resize(UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) UpperCAmelCase : Optional[Any] = img for bbox in img_annos: UpperCAmelCase : Union[str, Any] = bbox[1] * scale_x UpperCAmelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y) UpperCAmelCase : Any = bbox[3] * scale_x UpperCAmelCase : Tuple = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCAmelCase : List[str] = cva.resize( UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCAmelCase : int = img for bbox in img_annos: UpperCAmelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x) UpperCAmelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y) UpperCAmelCase : int = scale_x + bbox[3] * (1 - scale_x) UpperCAmelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCAmelCase : Union[str, Any] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def a__ ( UpperCAmelCase : int ) -> str: assert number_char > 1, "The number of character should greater than 1" UpperCAmelCase : List[str] = ascii_lowercase + digits return "".join(random.choice(UpperCAmelCase ) for _ in range(UpperCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
336
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) ) class __UpperCAmelCase : def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ): UpperCAmelCase : Optional[int] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = depth_multiplier UpperCAmelCase : Union[str, Any] = depth_divisible_by UpperCAmelCase : Optional[Any] = min_depth UpperCAmelCase : List[str] = expand_ratio UpperCAmelCase : Dict = tf_padding UpperCAmelCase : str = output_stride UpperCAmelCase : Union[str, Any] = first_layer_is_expansion UpperCAmelCase : List[Any] = finegrained_output UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[str] = is_training UpperCAmelCase : Tuple = num_labels UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = scope def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ): UpperCAmelCase : Any = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : Any = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[int] = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCAmelCase : Optional[Any] = model(__A, labels=__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = MobileNetVaModelTester(self ) UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__ ( self : Any ): pass def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(__A ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : int ): def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ): UpperCAmelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Optional[Any] = outputs.hidden_states UpperCAmelCase : List[Any] = 1_6 self.assertEqual(len(__A ), __A ) UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __magic_name__ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> int: UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : str = model(**__A ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = model.to(__A ) UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**__A ) UpperCAmelCase : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ], device=__A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
336
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _lowerCamelCase : Any = HfApi() _lowerCamelCase : Optional[int] = {} # fmt: off _lowerCamelCase : List[str] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) _lowerCamelCase : Optional[Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) _lowerCamelCase : int = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) _lowerCamelCase : Dict = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) _lowerCamelCase : Tuple = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) _lowerCamelCase : Dict = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) _lowerCamelCase : List[str] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) _lowerCamelCase : Any = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) _lowerCamelCase : Dict = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) _lowerCamelCase : Optional[int] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) _lowerCamelCase : Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) _lowerCamelCase : int = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) _lowerCamelCase : Union[str, Any] = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) _lowerCamelCase : Optional[Any] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) _lowerCamelCase : List[Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on _lowerCamelCase : Union[str, Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _lowerCamelCase : Optional[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): _lowerCamelCase : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _lowerCamelCase : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _lowerCamelCase : Any = torch.tensor([1_0] * noise.shape[0]) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :3_0], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
336
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
336
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
336
1
import os from math import logaa def a__ ( UpperCAmelCase : str = "base_exp.txt" ) -> int: UpperCAmelCase : float = 0 UpperCAmelCase : Any = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = list(map(UpperCAmelCase , line.split(''',''' ) ) ) if x * logaa(UpperCAmelCase ) > largest: UpperCAmelCase : str = x * logaa(UpperCAmelCase ) UpperCAmelCase : List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
336
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class __UpperCAmelCase : # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) @classmethod def __magic_name__ ( cls : Any ): return cls() @dataclass class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @property def __magic_name__ ( self : Optional[int] ): return True @register_to_config def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ): pass def __magic_name__ ( self : Optional[Any] ): return KarrasVeSchedulerState.create() def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ): UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy() UpperCAmelCase : Union[str, Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, ) def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ): if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 ) else: UpperCAmelCase : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 ) UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape ) UpperCAmelCase : Tuple = sigma + gamma * sigma UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : int = sample_hat + sigma_hat * model_output UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ): raise NotImplementedError()
336
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int]=0.999 , UpperCAmelCase : Dict="cosine" , ) -> Tuple: if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase : int ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase : Union[str, Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) UpperCAmelCase : List[Any] = [] for i in range(UpperCAmelCase ): UpperCAmelCase : int = i / num_diffusion_timesteps UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) ) return torch.tensor(UpperCAmelCase , dtype=torch.floataa ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): UpperCamelCase = [e.name for e in KarrasDiffusionSchedulers] UpperCamelCase = 2 @register_to_config def __init__( self : Optional[int], __A : int = 1_0_0_0, __A : float = 0.0_0_0_8_5, __A : float = 0.0_1_2, __A : str = "linear", __A : Optional[Union[np.ndarray, List[float]]] = None, __A : str = "epsilon", __A : str = "linspace", __A : int = 0, ): if trained_betas is not None: UpperCAmelCase : Optional[int] = torch.tensor(__A, dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase : List[str] = torch.linspace(__A, __A, __A, dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase : List[Any] = ( torch.linspace(beta_start**0.5, beta_end**0.5, __A, dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase : Dict = betas_for_alpha_bar(__A ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) UpperCAmelCase : str = 1.0 - self.betas UpperCAmelCase : Any = torch.cumprod(self.alphas, dim=0 ) # set all values self.set_timesteps(__A, __A, __A ) def __magic_name__ ( self : Optional[int], __A : str, __A : Optional[int]=None ): if schedule_timesteps is None: UpperCAmelCase : List[str] = self.timesteps UpperCAmelCase : List[str] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase : int = 1 if len(__A ) > 1 else 0 else: UpperCAmelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(__A ) else timestep UpperCAmelCase : Any = self._index_counter[timestep_int] return indices[pos].item() @property def __magic_name__ ( self : List[str] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __magic_name__ ( self : Optional[int], __A : torch.FloatTensor, __A : Union[float, torch.FloatTensor], ): UpperCAmelCase : Any = self.index_for_timestep(__A ) if self.state_in_first_order: UpperCAmelCase : List[Any] = self.sigmas[step_index] else: UpperCAmelCase : int = self.sigmas_interpol[step_index] UpperCAmelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5) return sample def __magic_name__ ( self : Dict, __A : int, __A : Union[str, torch.device] = None, __A : Optional[int] = None, ): UpperCAmelCase : Union[str, Any] = num_inference_steps UpperCAmelCase : Optional[int] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase : List[Any] = np.linspace(0, num_train_timesteps - 1, __A, dtype=__A )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase : Dict = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase : Any = (np.arange(0, __A ) * step_ratio).round()[::-1].copy().astype(__A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase : List[Any] = (np.arange(__A, 0, -step_ratio )).round().copy().astype(__A ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) UpperCAmelCase : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase : Tuple = torch.from_numpy(np.log(__A ) ).to(__A ) UpperCAmelCase : Tuple = np.interp(__A, np.arange(0, len(__A ) ), __A ) UpperCAmelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase : List[Any] = torch.from_numpy(__A ).to(device=__A ) # interpolate sigmas UpperCAmelCase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp() UpperCAmelCase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__A ).startswith('''mps''' ): # mps does not support float64 UpperCAmelCase : Optional[int] = torch.from_numpy(__A ).to(__A, dtype=torch.floataa ) else: UpperCAmelCase : Dict = torch.from_numpy(__A ).to(__A ) # interpolate timesteps UpperCAmelCase : Optional[int] = self.sigma_to_t(__A ).to(__A, dtype=timesteps.dtype ) UpperCAmelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten() UpperCAmelCase : Any = torch.cat([timesteps[:1], interleaved_timesteps] ) UpperCAmelCase : Any = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase : int = defaultdict(__A ) def __magic_name__ ( self : Optional[int], __A : Optional[Any] ): # get log sigma UpperCAmelCase : int = sigma.log() # get distribution UpperCAmelCase : Dict = log_sigma - self.log_sigmas[:, None] # get sigmas range UpperCAmelCase : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) UpperCAmelCase : List[Any] = low_idx + 1 UpperCAmelCase : Any = self.log_sigmas[low_idx] UpperCAmelCase : Union[str, Any] = self.log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase : Union[str, Any] = (low - log_sigma) / (low - high) UpperCAmelCase : Union[str, Any] = w.clamp(0, 1 ) # transform interpolation to time range UpperCAmelCase : Optional[Any] = (1 - w) * low_idx + w * high_idx UpperCAmelCase : List[Any] = t.view(sigma.shape ) return t @property def __magic_name__ ( self : int ): return self.sample is None def __magic_name__ ( self : Union[str, Any], __A : Union[torch.FloatTensor, np.ndarray], __A : Union[float, torch.FloatTensor], __A : Union[torch.FloatTensor, np.ndarray], __A : bool = True, ): UpperCAmelCase : Tuple = self.index_for_timestep(__A ) # advance index counter by 1 UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(__A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase : str = self.sigmas[step_index] UpperCAmelCase : Optional[int] = self.sigmas_interpol[step_index + 1] UpperCAmelCase : str = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method UpperCAmelCase : Dict = self.sigmas[step_index - 1] UpperCAmelCase : Optional[Any] = self.sigmas_interpol[step_index] UpperCAmelCase : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase : Dict = sigma_hat if self.state_in_first_order else sigma_interpol UpperCAmelCase : List[str] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol UpperCAmelCase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase : Tuple = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step UpperCAmelCase : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order UpperCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep UpperCAmelCase : Union[str, Any] = sigma_next - sigma_hat UpperCAmelCase : Tuple = self.sample UpperCAmelCase : str = None UpperCAmelCase : List[str] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __magic_name__ ( self : Any, __A : torch.FloatTensor, __A : torch.FloatTensor, __A : torch.FloatTensor, ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase : Union[str, Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__A ): # mps does not support float64 UpperCAmelCase : Union[str, Any] = self.timesteps.to(original_samples.device, dtype=torch.floataa ) UpperCAmelCase : Any = timesteps.to(original_samples.device, dtype=torch.floataa ) else: UpperCAmelCase : Any = self.timesteps.to(original_samples.device ) UpperCAmelCase : Optional[Any] = timesteps.to(original_samples.device ) UpperCAmelCase : Dict = [self.index_for_timestep(__A, __A ) for t in timesteps] UpperCAmelCase : Optional[int] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase : Optional[int] = sigma.unsqueeze(-1 ) UpperCAmelCase : Tuple = original_samples + noise * sigma return noisy_samples def __len__( self : List[str] ): return self.config.num_train_timesteps
336
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a__ ( ) -> Dict: if os.name == "nt": UpperCAmelCase : List[str] = CursorInfo() UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def a__ ( ) -> Optional[int]: if os.name == "nt": UpperCAmelCase : int = CursorInfo() UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Any = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def a__ ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
336
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
1
_lowerCamelCase : Union[str, Any] = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } _lowerCamelCase : Union[str, Any] = {value: key for key, value in encode_dict.items()} def a__ ( UpperCAmelCase : str ) -> str: UpperCAmelCase : Optional[Any] = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def a__ ( UpperCAmelCase : str ) -> str: if set(UpperCAmelCase ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) UpperCAmelCase : str = '''''' for word in coded.split(): while len(UpperCAmelCase ) != 0: decoded += decode_dict[word[:5]] UpperCAmelCase : int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
336
from __future__ import annotations def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]: if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) UpperCAmelCase : str = number_of_bytes // partitions UpperCAmelCase : Dict = [] for i in range(UpperCAmelCase ): UpperCAmelCase : int = i * bytes_per_partition + 1 UpperCAmelCase : Optional[int] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
336
1
from __future__ import annotations import math _lowerCamelCase : Tuple = "2020.9.26" _lowerCamelCase : Optional[int] = "xcodz-dot, cclaus, dhruvmanila" def a__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ) -> tuple[float, float]: if not all(isinstance(UpperCAmelCase , (float, int) ) for val in locals().values() ): UpperCAmelCase : int = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(UpperCAmelCase ) UpperCAmelCase : List[str] = ((x * distance) / (z + distance)) * scale UpperCAmelCase : Any = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def a__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : str , UpperCAmelCase : float ) -> tuple[float, float, float]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase : Optional[int] = locals() del input_variables["axis"] if not all(isinstance(UpperCAmelCase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase : Union[str, Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(UpperCAmelCase ) UpperCAmelCase : Optional[int] = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase : Any = x * math.cos(UpperCAmelCase ) - y * math.sin(UpperCAmelCase ) UpperCAmelCase : List[Any] = y * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase ) UpperCAmelCase : Any = z elif axis == "x": UpperCAmelCase : List[str] = y * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase ) UpperCAmelCase : str = z * math.cos(UpperCAmelCase ) + y * math.sin(UpperCAmelCase ) UpperCAmelCase : Dict = x elif axis == "y": UpperCAmelCase : Union[str, Any] = x * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = z * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""") print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
336
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]: if subparsers is not None: UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description ) else: UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments UpperCAmelCase : Optional[int] = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase ) return parser def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: UpperCAmelCase : List[Any] = defaults.command_file if not args.command and defaults.commands is not None: UpperCAmelCase : List[str] = defaults.commands if not args.tpu_name: UpperCAmelCase : Tuple = defaults.tpu_name if not args.tpu_zone: UpperCAmelCase : int = defaults.tpu_zone if args.accelerate_version == "dev": UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": UpperCAmelCase : Dict = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ): UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: UpperCAmelCase : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , UpperCAmelCase ): UpperCAmelCase : int = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate UpperCAmelCase : Optional[int] = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [f'''pip install {args.accelerate_version}'''] new_cmd += args.command UpperCAmelCase : int = '''; '''.join(UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess UpperCAmelCase : Any = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'''Running {" ".join(UpperCAmelCase )}''' ) return subprocess.run(UpperCAmelCase ) print('''Successfully setup pod.''' ) def a__ ( ) -> Any: UpperCAmelCase : Any = tpu_command_parser() UpperCAmelCase : Tuple = parser.parse_args() tpu_command_launcher(UpperCAmelCase )
336
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """trocr""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : int, __A : Any=5_0_2_6_5, __A : Union[str, Any]=1_0_2_4, __A : Any=1_2, __A : Optional[Any]=1_6, __A : Optional[Any]=4_0_9_6, __A : Any="gelu", __A : int=5_1_2, __A : Any=0.1, __A : Optional[int]=0.0, __A : int=0.0, __A : List[Any]=2, __A : List[str]=0.0_2, __A : Optional[int]=0.0, __A : Tuple=True, __A : Dict=False, __A : str=True, __A : Optional[Any]=True, __A : List[str]=1, __A : str=0, __A : Optional[Any]=2, **__A : List[Any], ): UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = d_model UpperCAmelCase : List[Any] = decoder_layers UpperCAmelCase : List[str] = decoder_attention_heads UpperCAmelCase : Optional[int] = decoder_ffn_dim UpperCAmelCase : Union[str, Any] = activation_function UpperCAmelCase : List[str] = max_position_embeddings UpperCAmelCase : Dict = dropout UpperCAmelCase : Tuple = attention_dropout UpperCAmelCase : int = activation_dropout UpperCAmelCase : List[Any] = init_std UpperCAmelCase : List[Any] = decoder_layerdrop UpperCAmelCase : List[Any] = use_cache UpperCAmelCase : Any = scale_embedding UpperCAmelCase : List[str] = use_learned_position_embeddings UpperCAmelCase : Optional[Any] = layernorm_embedding super().__init__( pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, decoder_start_token_id=__A, **__A, )
336
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[int] = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: print('''Loading config file...''' ) def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ): UpperCAmelCase : List[str] = [] for k, v in d.items(): UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k if isinstance(UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCAmelCase ) UpperCAmelCase : List[str] = argparse.Namespace() with open(UpperCAmelCase , '''r''' ) as yaml_file: try: UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader ) UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) ) return config def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]: UpperCAmelCase : int = MobileViTVaConfig() UpperCAmelCase : str = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase : Any = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : Any = 384 else: UpperCAmelCase : Tuple = 256 UpperCAmelCase : int = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase : Optional[Any] = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : str = 384 else: UpperCAmelCase : Dict = 256 UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase : Optional[Any] = 151 UpperCAmelCase : Tuple = 512 UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Tuple = True elif task_name.startswith('''voc_''' ): UpperCAmelCase : Dict = 21 UpperCAmelCase : str = 512 UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json''' UpperCAmelCase : Dict = True # orig_config UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase ) assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase : Union[str, Any] = '''huggingface/label-files''' UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : int = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : List[str] = val def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]: if base_model: UpperCAmelCase : Dict = '''''' else: UpperCAmelCase : Dict = '''mobilevitv2.''' UpperCAmelCase : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase : List[str] = k[8:] else: UpperCAmelCase : Dict = k if ".block." in k: UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase : Dict = [0, 1] elif i == 4: UpperCAmelCase : Dict = [0, 1, 2, 3] elif i == 5: UpperCAmelCase : int = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: UpperCAmelCase : Optional[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: UpperCAmelCase : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any: UpperCAmelCase : str = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def a__ ( ) -> Union[str, Any]: UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase ) # load original state_dict UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval() UpperCAmelCase : str = False else: UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval() UpperCAmelCase : Any = False # remove and rename some keys of load the original model UpperCAmelCase : Optional[Any] = checkpoint remove_unused_keys(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # load modified state_dict model.load_state_dict(UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase : Optional[Any] = outputs.logits UpperCAmelCase : int = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
336
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType _lowerCamelCase : Dict = logging.get_logger(__name__) _lowerCamelCase : int = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """imagegpt""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : List[Any], __A : int=5_1_2 + 1, __A : Optional[int]=3_2 * 3_2, __A : str=5_1_2, __A : int=2_4, __A : Any=8, __A : List[str]=None, __A : str="quick_gelu", __A : str=0.1, __A : int=0.1, __A : List[str]=0.1, __A : Tuple=1E-5, __A : str=0.0_2, __A : str=True, __A : Union[str, Any]=True, __A : Union[str, Any]=False, __A : Any=False, __A : int=False, **__A : int, ): UpperCAmelCase : Any = vocab_size UpperCAmelCase : Optional[int] = n_positions UpperCAmelCase : Union[str, Any] = n_embd UpperCAmelCase : List[Any] = n_layer UpperCAmelCase : str = n_head UpperCAmelCase : str = n_inner UpperCAmelCase : str = activation_function UpperCAmelCase : Optional[Any] = resid_pdrop UpperCAmelCase : str = embd_pdrop UpperCAmelCase : List[str] = attn_pdrop UpperCAmelCase : List[str] = layer_norm_epsilon UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Dict = scale_attn_weights UpperCAmelCase : List[str] = use_cache UpperCAmelCase : Any = scale_attn_by_inverse_layer_idx UpperCAmelCase : Union[str, Any] = reorder_and_upcast_attn UpperCAmelCase : Optional[int] = tie_word_embeddings super().__init__(tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): @property def __magic_name__ ( self : int ): return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __magic_name__ ( self : str, __A : "FeatureExtractionMixin", __A : int = 1, __A : int = -1, __A : bool = False, __A : Optional["TensorType"] = None, __A : int = 3, __A : int = 3_2, __A : int = 3_2, ): UpperCAmelCase : int = self._generate_dummy_images(__A, __A, __A, __A ) UpperCAmelCase : Dict = dict(preprocessor(images=__A, return_tensors=__A ) ) return inputs
336
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCAmelCase ( lowerCamelCase__ ): def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase : str = '''__cached_''' + self.fget.__name__ UpperCAmelCase : int = getattr(__A, __A, __A ) if cached is None: UpperCAmelCase : Any = self.fget(__A ) setattr(__A, __A, __A ) return cached def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_torch_fx_proxy(UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]: return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : str ) -> Tuple: return _is_numpy(UpperCAmelCase ) def a__ ( UpperCAmelCase : str ) -> List[Any]: import torch return isinstance(UpperCAmelCase , torch.Tensor ) def a__ ( UpperCAmelCase : str ) -> List[Any]: return False if not is_torch_available() else _is_torch(UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> List[str]: import torch return isinstance(UpperCAmelCase , torch.device ) def a__ ( UpperCAmelCase : Any ) -> Any: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: import torch if isinstance(UpperCAmelCase , UpperCAmelCase ): if hasattr(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ) else: return False return isinstance(UpperCAmelCase , torch.dtype ) def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase ) def a__ ( UpperCAmelCase : Any ) -> str: import tensorflow as tf return isinstance(UpperCAmelCase , tf.Tensor ) def a__ ( UpperCAmelCase : int ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[str] ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(UpperCAmelCase ) return type(UpperCAmelCase ) == tf.Tensor def a__ ( UpperCAmelCase : int ) -> List[Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[Any] ) -> Dict: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase , jnp.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]: return False if not is_flax_available() else _is_jax(UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Tuple: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return [to_py_obj(UpperCAmelCase ) for o in obj] elif is_tf_tensor(UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ).tolist() elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( UpperCAmelCase : Any ) -> List[str]: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return np.array(UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ) else: return obj class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = fields(self ) # Safety and consistency checks if not len(__A ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase : int = getattr(self, class_fields[0].name ) UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__A ): if isinstance(__A, __A ): UpperCAmelCase : Tuple = first_field.items() UpperCAmelCase : Any = True else: try: UpperCAmelCase : Optional[Any] = iter(__A ) UpperCAmelCase : Optional[Any] = True except TypeError: UpperCAmelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__A ): if ( not isinstance(__A, (list, tuple) ) or not len(__A ) == 2 or not isinstance(element[0], __A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: UpperCAmelCase : Union[str, Any] = element[1] elif first_field is not None: UpperCAmelCase : Union[str, Any] = first_field else: for field in class_fields: UpperCAmelCase : Optional[Any] = getattr(self, field.name ) if v is not None: UpperCAmelCase : Optional[int] = v def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Any, *__A : Dict, **__A : str ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str], __A : List[str] ): if isinstance(__A, __A ): UpperCAmelCase : int = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__A, __A ) super().__setattr__(__A, __A ) def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ): # Will raise a KeyException if needed super().__setitem__(__A, __A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__A, __A ) def __magic_name__ ( self : List[str] ): return tuple(self[k] for k in self.keys() ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def __magic_name__ ( cls : List[Any], __A : Tuple ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """longest""" UpperCamelCase = """max_length""" UpperCamelCase = """do_not_pad""" class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """pt""" UpperCamelCase = """tf""" UpperCamelCase = """np""" UpperCamelCase = """jax""" class __UpperCAmelCase : def __init__( self : Any, __A : List[ContextManager] ): UpperCAmelCase : Tuple = context_managers UpperCAmelCase : Tuple = ExitStack() def __enter__( self : Any ): for context_manager in self.context_managers: self.stack.enter_context(__A ) def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ): self.stack.__exit__(*__A, **__A ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : int = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( UpperCAmelCase : Dict ) -> Any: UpperCAmelCase : List[Any] = model_class.__name__ UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]: def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ): for k, v in d.items(): UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k if v and isinstance(UpperCAmelCase , UpperCAmelCase ): yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) @contextmanager def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]: if is_numpy_array(UpperCAmelCase ): return np.transpose(UpperCAmelCase , axes=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.T if axes is None else array.permute(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.reshape(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.reshape(UpperCAmelCase , UpperCAmelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any: if is_numpy_array(UpperCAmelCase ): return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if is_numpy_array(UpperCAmelCase ): return np.expand_dims(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.unsqueeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.size(UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.numel() elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.size(UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict: for key, value in auto_map.items(): if isinstance(UpperCAmelCase , (tuple, list) ): UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase : List[Any] = f'''{repo_id}--{value}''' return auto_map def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCAmelCase ): UpperCAmelCase : Any = base_class.__module__ UpperCAmelCase : Dict = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
336
1
def a__ ( ) -> list[list[int]]: return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )] _lowerCamelCase : List[str] = generate_large_matrix() _lowerCamelCase : Union[str, Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def a__ ( UpperCAmelCase : list[list[int]] ) -> None: assert all(row == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for row in grid ) assert all(list(UpperCAmelCase ) == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for col in zip(*UpperCAmelCase ) ) def a__ ( UpperCAmelCase : list[int] ) -> int: UpperCAmelCase : Union[str, Any] = 0 UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: UpperCAmelCase : Any = (left + right) // 2 UpperCAmelCase : List[str] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: UpperCAmelCase : List[Any] = mid + 1 else: UpperCAmelCase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCAmelCase ) def a__ ( UpperCAmelCase : list[list[int]] ) -> int: UpperCAmelCase : Dict = 0 UpperCAmelCase : List[Any] = len(grid[0] ) for i in range(len(UpperCAmelCase ) ): UpperCAmelCase : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCAmelCase ) * len(grid[0] )) - total def a__ ( UpperCAmelCase : list[list[int]] ) -> int: return len([number for row in grid for number in row if number < 0] ) def a__ ( UpperCAmelCase : list[list[int]] ) -> int: UpperCAmelCase : Tuple = 0 for row in grid: for i, number in enumerate(UpperCAmelCase ): if number < 0: total += len(UpperCAmelCase ) - i break return total def a__ ( ) -> None: from timeit import timeit print('''Running benchmarks''' ) UpperCAmelCase : Union[str, Any] = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): UpperCAmelCase : Any = timeit(f'''{func}(grid=grid)''' , setup=UpperCAmelCase , number=500 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LayoutLMTokenizer UpperCamelCase = LayoutLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def __magic_name__ ( self : Any ): super().setUp() UpperCAmelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__ ( self : Union[str, Any], **__A : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Optional[int], __A : int ): UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running''' UpperCAmelCase : Optional[int] = '''unwanted, running''' return input_text, output_text def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] ) def __magic_name__ ( self : Optional[int] ): pass
336
1
from string import ascii_lowercase, ascii_uppercase def a__ ( UpperCAmelCase : str ) -> str: if not sentence: return "" UpperCAmelCase : Union[str, Any] = dict(zip(UpperCAmelCase , UpperCAmelCase ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
336
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = mask_ratio UpperCAmelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ): UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ): UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A ) UpperCAmelCase : int = model(__A, training=__A ) # expected sequence length = num_patches UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A ) UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(__A, training=__A ) UpperCAmelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = TFViTMAEModelTester(self ) UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : str ): UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(__A ) UpperCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __magic_name__ ( self : int ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : Dict = model(__A, noise=__A ) UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A ) UpperCAmelCase : Dict = outputs_dict[0].numpy() UpperCAmelCase : Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 ) def __magic_name__ ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A : Union[str, Any] ): UpperCAmelCase : str = {} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): UpperCAmelCase : Tuple = v.numpy() else: UpperCAmelCase : str = np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : Any = self._prepare_for_class(__A, __A ) UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A ) UpperCAmelCase : str = model(__A, noise=__A ) UpperCAmelCase : str = model(**__A, noise=__A ) self.assert_outputs_same(__A, __A ) def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ): # make masks reproducible np.random.seed(2 ) UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : int = tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : List[Any] = tf_noise super().check_pt_tf_models(__A, __A, __A ) def __magic_name__ ( self : str ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__A, __A ),) if isinstance(__A, __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A, '''_keras_serializable''', __A ) } UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(__A ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : Tuple = main_layer_class(__A ) UpperCAmelCase : int = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) ) UpperCAmelCase : List[Any] = model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' ) model.save(__A ) UpperCAmelCase : List[str] = tf.keras.models.load_model( __A, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A, tf.keras.Model ) UpperCAmelCase : Tuple = model(__A ) self.assert_outputs_same(__A, __A ) @slow def __magic_name__ ( self : Dict ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(__A ) UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A ) UpperCAmelCase : Union[str, Any] = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() UpperCAmelCase : Union[str, Any] = 0 else: UpperCAmelCase : Optional[int] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A, saved_model=__A ) UpperCAmelCase : Dict = model_class.from_pretrained(__A ) UpperCAmelCase : str = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy() UpperCAmelCase : Dict = 0 else: UpperCAmelCase : Any = after_outputs['''logits'''].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A, 1E-5 ) def __magic_name__ ( self : Optional[Any] ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : List[Any] = model(__A, noise=__A ) UpperCAmelCase : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) UpperCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : str = model_class.from_config(model.config ) UpperCAmelCase : List[str] = new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Tuple = new_model(__A, noise=__A ) self.assert_outputs_same(__A, __A ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Dict: UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__ ( self : str ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : Optional[int] = ViTMAEConfig() UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : Optional[int] = model(**__A, noise=__A ) # verify the logits UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
336
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ShapEPipeline UpperCamelCase = ["""prompt"""] UpperCamelCase = ["""prompt"""] UpperCamelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] UpperCamelCase = False @property def __magic_name__ ( self : str ): return 3_2 @property def __magic_name__ ( self : List[str] ): return 3_2 @property def __magic_name__ ( self : Any ): return self.time_input_dim * 4 @property def __magic_name__ ( self : List[str] ): return 8 @property def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __magic_name__ ( self : Union[str, Any] ): torch.manual_seed(0 ) UpperCAmelCase : Any = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, ) return CLIPTextModelWithProjection(__A ) @property def __magic_name__ ( self : List[Any] ): torch.manual_seed(0 ) UpperCAmelCase : Tuple = { '''num_attention_heads''': 2, '''attention_head_dim''': 1_6, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 3_2, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } UpperCAmelCase : List[Any] = PriorTransformer(**__A ) return model @property def __magic_name__ ( self : List[Any] ): torch.manual_seed(0 ) UpperCAmelCase : int = { '''param_shapes''': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 1_2, '''background''': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase : Any = ShapERenderer(**__A ) return model def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Optional[Any] = self.dummy_prior UpperCAmelCase : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase : Optional[int] = self.dummy_tokenizer UpperCAmelCase : Dict = self.dummy_renderer UpperCAmelCase : str = HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_0_2_4, prediction_type='''sample''', use_karras_sigmas=__A, clip_sample=__A, clip_sample_range=1.0, ) UpperCAmelCase : Dict = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : Tuple, __A : Dict, __A : Optional[int]=0 ): if str(__A ).startswith('''mps''' ): UpperCAmelCase : Tuple = torch.manual_seed(__A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=__A ).manual_seed(__A ) UpperCAmelCase : List[Any] = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 3_2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = '''cpu''' UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__A ) UpperCAmelCase : List[Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Dict = pipe(**self.get_dummy_inputs(__A ) ) UpperCAmelCase : int = output.images[0] UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) UpperCAmelCase : int = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Union[str, Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : str = torch_device == '''cpu''' UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=__A, relax_max_difference=__A, ) def __magic_name__ ( self : Dict ): UpperCAmelCase : List[Any] = self.get_dummy_components() UpperCAmelCase : str = self.pipeline_class(**__A ) UpperCAmelCase : Tuple = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = 2 UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__A ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase : List[str] = batch_size * [inputs[key]] UpperCAmelCase : Tuple = pipe(**__A, num_images_per_prompt=__A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple ): UpperCAmelCase : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) UpperCAmelCase : Dict = ShapEPipeline.from_pretrained('''openai/shap-e''' ) UpperCAmelCase : Any = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Union[str, Any] = torch.Generator(device=__A ).manual_seed(0 ) UpperCAmelCase : Optional[int] = pipe( '''a shark''', generator=__A, guidance_scale=1_5.0, num_inference_steps=6_4, frame_size=6_4, output_type='''np''', ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(__A, __A )
336
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = (PNDMScheduler,) UpperCamelCase = (("""num_inference_steps""", 5_0),) def __magic_name__ ( self : List[Any], **__A : List[str] ): UpperCAmelCase : List[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**__A ) return config def __magic_name__ ( self : Dict, __A : List[str]=0, **__A : Union[str, Any] ): UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase : Tuple = kwargs.pop('''num_inference_steps''', __A ) UpperCAmelCase : str = self.dummy_sample UpperCAmelCase : List[Any] = 0.1 * sample UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: UpperCAmelCase : List[str] = self.get_scheduler_config(**__A ) UpperCAmelCase : str = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals UpperCAmelCase : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) UpperCAmelCase : int = scheduler_class.from_pretrained(__A ) new_scheduler.set_timesteps(__A ) # copy over dummy past residuals UpperCAmelCase : int = dummy_past_residuals[:] UpperCAmelCase : Dict = scheduler.step_prk(__A, __A, __A, **__A ).prev_sample UpperCAmelCase : Any = new_scheduler.step_prk(__A, __A, __A, **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase : Optional[Any] = scheduler.step_plms(__A, __A, __A, **__A ).prev_sample UpperCAmelCase : int = new_scheduler.step_plms(__A, __A, __A, **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Optional[int] ): pass def __magic_name__ ( self : List[str], __A : Any=0, **__A : List[str] ): UpperCAmelCase : int = dict(self.forward_default_kwargs ) UpperCAmelCase : Optional[int] = kwargs.pop('''num_inference_steps''', __A ) UpperCAmelCase : Union[str, Any] = self.dummy_sample UpperCAmelCase : Dict = 0.1 * sample UpperCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: UpperCAmelCase : List[Any] = self.get_scheduler_config() UpperCAmelCase : Union[str, Any] = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) UpperCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(__A ) # copy over dummy past residuals new_scheduler.set_timesteps(__A ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase : str = dummy_past_residuals[:] UpperCAmelCase : int = scheduler.step_prk(__A, __A, __A, **__A ).prev_sample UpperCAmelCase : List[str] = new_scheduler.step_prk(__A, __A, __A, **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase : Any = scheduler.step_plms(__A, __A, __A, **__A ).prev_sample UpperCAmelCase : Tuple = new_scheduler.step_plms(__A, __A, __A, **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Optional[int], **__A : List[Any] ): UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : List[Any] = self.get_scheduler_config(**__A ) UpperCAmelCase : int = scheduler_class(**__A ) UpperCAmelCase : Optional[Any] = 1_0 UpperCAmelCase : Optional[int] = self.dummy_model() UpperCAmelCase : Dict = self.dummy_sample_deter scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase : str = model(__A, __A ) UpperCAmelCase : List[str] = scheduler.step_prk(__A, __A, __A ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase : List[str] = model(__A, __A ) UpperCAmelCase : Dict = scheduler.step_plms(__A, __A, __A ).prev_sample return sample def __magic_name__ ( self : Any ): UpperCAmelCase : Any = dict(self.forward_default_kwargs ) UpperCAmelCase : List[Any] = kwargs.pop('''num_inference_steps''', __A ) for scheduler_class in self.scheduler_classes: UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : Optional[int] = scheduler_class(**__A ) UpperCAmelCase : Union[str, Any] = self.dummy_sample UpperCAmelCase : Any = 0.1 * sample if num_inference_steps is not None and hasattr(__A, '''set_timesteps''' ): scheduler.set_timesteps(__A ) elif num_inference_steps is not None and not hasattr(__A, '''set_timesteps''' ): UpperCAmelCase : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] UpperCAmelCase : Tuple = dummy_past_residuals[:] UpperCAmelCase : Union[str, Any] = scheduler.step_prk(__A, 0, __A, **__A ).prev_sample UpperCAmelCase : Any = scheduler.step_prk(__A, 1, __A, **__A ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) UpperCAmelCase : List[Any] = scheduler.step_plms(__A, 0, __A, **__A ).prev_sample UpperCAmelCase : str = scheduler.step_plms(__A, 1, __A, **__A ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __magic_name__ ( self : Any ): for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__A ) def __magic_name__ ( self : Optional[Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__A ) UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : Tuple = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase : Optional[Any] = scheduler_class(**__A ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps, torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ), ) def __magic_name__ ( self : Dict ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1], [0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__A, beta_end=__A ) def __magic_name__ ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__A ) def __magic_name__ ( self : str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def __magic_name__ ( self : Dict ): for t in [1, 5, 1_0]: self.check_over_forward(time_step=__A ) def __magic_name__ ( self : Tuple ): for t, num_inference_steps in zip([1, 5, 1_0], [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=__A ) def __magic_name__ ( self : Optional[int] ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase : List[Any] = 2_7 for scheduler_class in self.scheduler_classes: UpperCAmelCase : Union[str, Any] = self.dummy_sample UpperCAmelCase : Optional[Any] = 0.1 * sample UpperCAmelCase : List[str] = self.get_scheduler_config() UpperCAmelCase : Union[str, Any] = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase : Optional[Any] = scheduler.step_prk(__A, __A, __A ).prev_sample def __magic_name__ ( self : Optional[int] ): with self.assertRaises(__A ): UpperCAmelCase : List[Any] = self.scheduler_classes[0] UpperCAmelCase : List[str] = self.get_scheduler_config() UpperCAmelCase : Union[str, Any] = scheduler_class(**__A ) scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample def __magic_name__ ( self : int ): UpperCAmelCase : int = self.full_loop() UpperCAmelCase : Tuple = torch.sum(torch.abs(__A ) ) UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3 def __magic_name__ ( self : Tuple ): UpperCAmelCase : Dict = self.full_loop(prediction_type='''v_prediction''' ) UpperCAmelCase : List[Any] = torch.sum(torch.abs(__A ) ) UpperCAmelCase : List[str] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3 def __magic_name__ ( self : int ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase : Optional[Any] = self.full_loop(set_alpha_to_one=__A, beta_start=0.0_1 ) UpperCAmelCase : List[str] = torch.sum(torch.abs(__A ) ) UpperCAmelCase : str = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3 def __magic_name__ ( self : List[Any] ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase : Any = self.full_loop(set_alpha_to_one=__A, beta_start=0.0_1 ) UpperCAmelCase : Dict = torch.sum(torch.abs(__A ) ) UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
336
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
1
from typing import TYPE_CHECKING from ..utils import _LazyModule _lowerCamelCase : int = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys _lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCAmelCase : def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Any = hidden_act UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope UpperCAmelCase : List[str] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : str = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, ) def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ): UpperCAmelCase : int = ViTHybridModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ): UpperCAmelCase : str = self.type_sequence_label_size UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ViTHybridModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : int ): UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, nn.Linear ) ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(__A ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : str = [*signature.parameters.keys()] UpperCAmelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = _config_zero_init(__A ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(config=__A ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def __magic_name__ ( self : List[str] ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> Tuple: UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __A ) UpperCAmelCase : Tuple = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**__A ) # verify the logits UpperCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow @require_accelerate def __magic_name__ ( self : Dict ): UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**__A ) UpperCAmelCase : Any = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Dict = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
336
1
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) UpperCAmelCase : int = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b" UpperCAmelCase : Optional[Any] = str(bin(UpperCAmelCase ) )[2:] UpperCAmelCase : Tuple = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
336
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
1
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
336
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCAmelCase : def __magic_name__ ( self : int, __A : Dict ): raise NotImplementedError() def __magic_name__ ( self : int ): raise NotImplementedError() class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ): UpperCAmelCase : List[str] = tokenizer UpperCAmelCase : str = skip_prompt UpperCAmelCase : List[str] = decode_kwargs # variables used in the streaming process UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = 0 UpperCAmelCase : Union[str, Any] = True def __magic_name__ ( self : Dict, __A : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: UpperCAmelCase : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] UpperCAmelCase : int = [] UpperCAmelCase : int = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __magic_name__ ( self : str ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) UpperCAmelCase : Dict = text[self.print_len :] UpperCAmelCase : List[Any] = [] UpperCAmelCase : List[Any] = 0 else: UpperCAmelCase : Dict = '''''' UpperCAmelCase : str = True self.on_finalized_text(__A, stream_end=__A ) def __magic_name__ ( self : List[str], __A : str, __A : bool = False ): print(__A, flush=__A, end='''''' if not stream_end else None ) def __magic_name__ ( self : List[Any], __A : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ): super().__init__(__A, __A, **__A ) UpperCAmelCase : Dict = Queue() UpperCAmelCase : Any = None UpperCAmelCase : Any = timeout def __magic_name__ ( self : Dict, __A : str, __A : bool = False ): self.text_queue.put(__A, timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout ) def __iter__( self : int ): return self def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
336
1
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
import numpy # List of input, output pairs _lowerCamelCase : Dict = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _lowerCamelCase : Dict = [2, 4, 1, 5] _lowerCamelCase : Dict = len(train_data) _lowerCamelCase : int = 0.0_0_9 def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict: return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output( UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Any: UpperCAmelCase : str = 0 for i in range(len(UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict: UpperCAmelCase : Optional[int] = 0 for i in range(UpperCAmelCase ): if index == -1: summation_value += _error(UpperCAmelCase ) else: summation_value += _error(UpperCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( UpperCAmelCase : Dict ) -> Dict: UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] = 0.000002 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = get_cost_derivative(i - 1 ) UpperCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ): break UpperCAmelCase : int = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ) -> List[Any]: for i in range(len(UpperCAmelCase ) ): print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
336
1
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] ) -> None: UpperCAmelCase : List[str] = len(UpperCAmelCase ) print('''The following activities are selected:''' ) # The first activity is always selected UpperCAmelCase : Union[str, Any] = 0 print(UpperCAmelCase , end=''',''' ) # Consider rest of the activities for j in range(UpperCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(UpperCAmelCase , end=''',''' ) UpperCAmelCase : Any = j if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5] _lowerCamelCase : Optional[int] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
336
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __UpperCAmelCase ( unittest.TestCase , lowerCamelCase__ ): def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Optional[Any] = load_tool('''text-classification''' ) self.tool.setup() UpperCAmelCase : List[str] = load_tool('''text-classification''', remote=__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.tool('''That\'s quite cool''', ['''positive''', '''negative'''] ) self.assertEqual(__A, '''positive''' ) def __magic_name__ ( self : str ): UpperCAmelCase : str = self.remote_tool('''That\'s quite cool''', ['''positive''', '''negative'''] ) self.assertEqual(__A, '''positive''' ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = self.tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] ) self.assertEqual(__A, '''positive''' ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Union[str, Any] = self.remote_tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] ) self.assertEqual(__A, '''positive''' )
336
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''), (f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''), (f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''), (f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any: for i in range(config.num_hidden_layers ): UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : str = q_bias UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) UpperCAmelCase : str = gamma_a UpperCAmelCase : Dict = gamma_a def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : str = val def a__ ( ) -> Optional[int]: UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]: UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase : List[Any] = 1_024 UpperCAmelCase : Optional[Any] = 4_096 UpperCAmelCase : Any = 24 UpperCAmelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : List[Any] = '''huggingface/label-files''' UpperCAmelCase : Any = '''rvlcdip-id2label.json''' UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image UpperCAmelCase : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str = encoding['''pixel_values'''] UpperCAmelCase : Any = model(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = outputs.logits # verify logits UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
336
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[int] = logging.get_logger(__name__) _lowerCamelCase : str = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """ibert""" def __init__( self : Optional[Any], __A : Any=3_0_5_2_2, __A : Union[str, Any]=7_6_8, __A : Optional[int]=1_2, __A : Union[str, Any]=1_2, __A : str=3_0_7_2, __A : Tuple="gelu", __A : int=0.1, __A : Optional[int]=0.1, __A : Optional[int]=5_1_2, __A : str=2, __A : Tuple=0.0_2, __A : Tuple=1E-12, __A : int=1, __A : str=0, __A : Any=2, __A : Any="absolute", __A : Optional[int]=False, __A : Dict="none", **__A : Any, ): super().__init__(pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, **__A ) UpperCAmelCase : Tuple = vocab_size UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : int = hidden_act UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Optional[int] = max_position_embeddings UpperCAmelCase : Tuple = type_vocab_size UpperCAmelCase : int = initializer_range UpperCAmelCase : Union[str, Any] = layer_norm_eps UpperCAmelCase : Dict = position_embedding_type UpperCAmelCase : List[Any] = quant_mode UpperCAmelCase : Tuple = force_dequant class __UpperCAmelCase ( lowerCamelCase__ ): @property def __magic_name__ ( self : List[str] ): if self.task == "multiple-choice": UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
336
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
1
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _lowerCamelCase : Dict = True from torch.cuda.amp import autocast _lowerCamelCase : Optional[int] = logging.getLogger(__name__) def a__ ( UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None ) -> Union[str, Any]: return field(default_factory=lambda: default , metadata=UpperCAmelCase ) @dataclass class __UpperCAmelCase : UpperCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) UpperCamelCase = field( default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} ) UpperCamelCase = field( default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} ) UpperCamelCase = field( default=0.1 , metadata={ """help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.""" } , ) UpperCamelCase = field( default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , ) UpperCamelCase = field( default=0.0_5 , metadata={ """help""": ( """Propability of each feature vector along the time axis to be chosen as the start of the vector""" """span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature""" """vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.""" ) } , ) UpperCamelCase = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} ) @dataclass class __UpperCAmelCase : UpperCamelCase = field( default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase = field( default="""train+validation""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase = field( default=lowerCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of validation examples to this """ """value if set.""" ) } , ) UpperCamelCase = list_field( default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , ) @dataclass class __UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = True UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None def __call__( self : List[str], __A : List[Dict[str, Union[List[int], torch.Tensor]]] ): # split inputs and labels since they have to be of different lenghts and need # different padding methods UpperCAmelCase : Optional[Any] = [{'''input_values''': feature['''input_values''']} for feature in features] UpperCAmelCase : Optional[Any] = [{'''input_ids''': feature['''labels''']} for feature in features] UpperCAmelCase : Tuple = self.processor.pad( __A, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', ) UpperCAmelCase : Any = self.processor.pad( labels=__A, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='''pt''', ) # replace padding with -100 to ignore loss correctly UpperCAmelCase : Any = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ), -1_0_0 ) UpperCAmelCase : Dict = labels return batch class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : List[Any], __A : nn.Module, __A : Dict[str, Union[torch.Tensor, Any]] ): model.train() UpperCAmelCase : str = self._prepare_inputs(__A ) if self.use_amp: with autocast(): UpperCAmelCase : Dict = self.compute_loss(__A, __A ) else: UpperCAmelCase : Any = self.compute_loss(__A, __A ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase : Optional[Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase : Any = loss.sum() / (inputs['''labels'''] >= 0).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase : Union[str, Any] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__A ).backward() elif self.use_apex: with amp.scale_loss(__A, self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__A ) else: loss.backward() return loss.detach() def a__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. UpperCAmelCase : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: UpperCAmelCase : Tuple = datasets.load_dataset( '''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name ) UpperCAmelCase : Any = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' ) # Create and save tokenizer UpperCAmelCase : List[str] = f'''[{"".join(data_args.chars_to_ignore )}]''' def remove_special_characters(UpperCAmelCase : int ): UpperCAmelCase : Optional[Any] = re.sub(UpperCAmelCase , '''''' , batch['''sentence'''] ).lower() + ''' ''' return batch UpperCAmelCase : List[str] = train_dataset.map(UpperCAmelCase , remove_columns=['''sentence'''] ) UpperCAmelCase : Any = eval_dataset.map(UpperCAmelCase , remove_columns=['''sentence'''] ) def extract_all_chars(UpperCAmelCase : Optional[int] ): UpperCAmelCase : Any = ''' '''.join(batch['''text'''] ) UpperCAmelCase : List[str] = list(set(UpperCAmelCase ) ) return {"vocab": [vocab], "all_text": [all_text]} UpperCAmelCase : Any = train_dataset.map( UpperCAmelCase , batched=UpperCAmelCase , batch_size=-1 , keep_in_memory=UpperCAmelCase , remove_columns=train_dataset.column_names , ) UpperCAmelCase : Union[str, Any] = train_dataset.map( UpperCAmelCase , batched=UpperCAmelCase , batch_size=-1 , keep_in_memory=UpperCAmelCase , remove_columns=eval_dataset.column_names , ) UpperCAmelCase : Dict = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) ) UpperCAmelCase : Tuple = {v: k for k, v in enumerate(UpperCAmelCase )} UpperCAmelCase : Union[str, Any] = vocab_dict[''' '''] del vocab_dict[" "] UpperCAmelCase : Optional[Any] = len(UpperCAmelCase ) UpperCAmelCase : Any = len(UpperCAmelCase ) with open('''vocab.json''' , '''w''' ) as vocab_file: json.dump(UpperCAmelCase , UpperCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase : str = WavaVecaCTCTokenizer( '''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , ) UpperCAmelCase : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase ) UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase ) UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: UpperCAmelCase : Tuple = min(len(UpperCAmelCase ) , data_args.max_train_samples ) UpperCAmelCase : Tuple = train_dataset.select(range(UpperCAmelCase ) ) if data_args.max_val_samples is not None: UpperCAmelCase : Tuple = eval_dataset.select(range(data_args.max_val_samples ) ) UpperCAmelCase : Dict = torchaudio.transforms.Resample(48_000 , 16_000 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(UpperCAmelCase : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Tuple = torchaudio.load(batch['''path'''] ) UpperCAmelCase : Optional[Any] = resampler(UpperCAmelCase ).squeeze().numpy() UpperCAmelCase : List[Any] = 16_000 UpperCAmelCase : Optional[Any] = batch['''text'''] return batch UpperCAmelCase : List[Any] = train_dataset.map( UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) UpperCAmelCase : Tuple = eval_dataset.map( UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(UpperCAmelCase : Any ): # check that all files have the correct sampling rate assert ( len(set(batch['''sampling_rate'''] ) ) == 1 ), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.''' UpperCAmelCase : Union[str, Any] = processor( audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] ) batch.update(UpperCAmelCase ) return batch UpperCAmelCase : Tuple = train_dataset.map( UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , ) UpperCAmelCase : str = eval_dataset.map( UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , ) # Metric UpperCAmelCase : Union[str, Any] = datasets.load_metric('''wer''' ) def compute_metrics(UpperCAmelCase : List[str] ): UpperCAmelCase : Tuple = pred.predictions UpperCAmelCase : Any = np.argmax(UpperCAmelCase , axis=-1 ) UpperCAmelCase : int = processor.tokenizer.pad_token_id UpperCAmelCase : Any = processor.batch_decode(UpperCAmelCase ) # we do not want to group tokens when computing the metrics UpperCAmelCase : Dict = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase ) UpperCAmelCase : Tuple = wer_metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator UpperCAmelCase : int = DataCollatorCTCWithPadding(processor=UpperCAmelCase , padding=UpperCAmelCase ) # Initialize our Trainer UpperCAmelCase : Tuple = CTCTrainer( model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: UpperCAmelCase : Any = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): UpperCAmelCase : Dict = model_args.model_name_or_path else: UpperCAmelCase : Tuple = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) UpperCAmelCase : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase ) trainer.save_model() UpperCAmelCase : Tuple = train_result.metrics UpperCAmelCase : int = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase ) ) UpperCAmelCase : Dict = min(UpperCAmelCase , len(UpperCAmelCase ) ) trainer.log_metrics('''train''' , UpperCAmelCase ) trainer.save_metrics('''train''' , UpperCAmelCase ) trainer.save_state() # Evaluation UpperCAmelCase : List[Any] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase : Any = trainer.evaluate() UpperCAmelCase : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase ) UpperCAmelCase : List[Any] = min(UpperCAmelCase , len(UpperCAmelCase ) ) trainer.log_metrics('''eval''' , UpperCAmelCase ) trainer.save_metrics('''eval''' , UpperCAmelCase ) return results if __name__ == "__main__": main()
336
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = {"vocab_file": "vocab.txt"} _lowerCamelCase : List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase : List[Any] = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def a__ ( UpperCAmelCase : List[str] ) -> Any: with open(UpperCAmelCase , '''r''' ) as f: UpperCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ): super().__init__(**__A ) UpperCAmelCase : Tuple = load_vocab_file(__A ) UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Union[str, Any] = unk_token UpperCAmelCase : Optional[Any] = cls_token UpperCAmelCase : Optional[int] = pad_token UpperCAmelCase : Optional[int] = mask_token UpperCAmelCase : List[str] = eos_token UpperCAmelCase : Optional[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __magic_name__ ( self : Tuple, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : List[Any], __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ): return text.split() def __magic_name__ ( self : Optional[int], __A : Dict=False ): return len(self._id_to_token ) def __magic_name__ ( self : int ): return {token: i for i, token in enumerate(self.all_tokens )} def __magic_name__ ( self : Tuple, __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: mask += [0] * len(__A ) + [1] return mask def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ): UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__A, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __magic_name__ ( self : Dict ): return self.get_vocab_size(with_added_tokens=__A ) def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ): return super()._add_tokens(__A, special_tokens=__A )
336
1
# flake8: noqa # Lint as: python3 _lowerCamelCase : Optional[int] = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
336
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) ) class __UpperCAmelCase : def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ): UpperCAmelCase : Optional[int] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = depth_multiplier UpperCAmelCase : Union[str, Any] = depth_divisible_by UpperCAmelCase : Optional[Any] = min_depth UpperCAmelCase : List[str] = expand_ratio UpperCAmelCase : Dict = tf_padding UpperCAmelCase : str = output_stride UpperCAmelCase : Union[str, Any] = first_layer_is_expansion UpperCAmelCase : List[Any] = finegrained_output UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[str] = is_training UpperCAmelCase : Tuple = num_labels UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = scope def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ): UpperCAmelCase : Any = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : Any = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[int] = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCAmelCase : Optional[Any] = model(__A, labels=__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = MobileNetVaModelTester(self ) UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__ ( self : Any ): pass def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(__A ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : int ): def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ): UpperCAmelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Optional[Any] = outputs.hidden_states UpperCAmelCase : List[Any] = 1_6 self.assertEqual(len(__A ), __A ) UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __magic_name__ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> int: UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : str = model(**__A ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = model.to(__A ) UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**__A ) UpperCAmelCase : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ], device=__A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
336
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase : def __init__( self : Any, __A : Optional[Any], __A : List[Any]=1_2, __A : Optional[int]=7, __A : Optional[Any]=True, __A : int=True, __A : List[str]=True, __A : Union[str, Any]=9_9, __A : Tuple=3_2, __A : str=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : int=3_7, __A : str=0.1, __A : Tuple=0.1, __A : str=5_1_2, __A : Any=0.0_2, __A : Any=0, __A : Optional[Any]=None, ): UpperCAmelCase : Dict = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : Dict = seq_length UpperCAmelCase : Optional[int] = is_training UpperCAmelCase : Tuple = use_input_mask UpperCAmelCase : Tuple = use_labels UpperCAmelCase : Optional[Any] = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : Tuple = projection_dim UpperCAmelCase : List[Any] = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : List[str] = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Dict = max_position_embeddings UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : Union[str, Any] = scope UpperCAmelCase : List[Any] = bos_token_id def __magic_name__ ( self : Tuple ): UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : List[str] = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : Dict = input_mask.shape UpperCAmelCase : str = np.random.randint(1, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): UpperCAmelCase : List[str] = 1 UpperCAmelCase : Union[str, Any] = 0 UpperCAmelCase : List[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__A ) def __magic_name__ ( self : List[str] ): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def __magic_name__ ( self : Optional[Any], __A : str, __A : Dict, __A : Any ): UpperCAmelCase : Union[str, Any] = TFBlipTextModel(config=__A ) UpperCAmelCase : Dict = model(__A, attention_mask=__A, training=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else () UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Optional[Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self, config_class=__A, hidden_size=3_7 ) def __magic_name__ ( self : str ): self.config_tester.run_common_tests() def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Tuple ): pass def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def __magic_name__ ( self : int ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def __magic_name__ ( self : Dict ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : Optional[int] ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Dict = TFBlipTextModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __magic_name__ ( self : int, __A : int=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=__A )
336
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = 0 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__A, __A ) def __magic_name__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : str = Path(__A ) / '''preprocessor_config.json''' UpperCAmelCase : List[Any] = Path(__A ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) ) UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(__A ) self.assertIsInstance(__A, __A ) def __magic_name__ ( self : Any ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = Path(__A ) / '''preprocessor_config.json''' UpperCAmelCase : str = Path(__A ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(__A ) self.assertIsInstance(__A, __A ) def __magic_name__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase : List[str] = Path(__A ) / '''preprocessor_config.json''' UpperCAmelCase : Optional[Any] = Path(__A ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__A ).to_dict() config_dict.pop('''image_processor_type''' ) UpperCAmelCase : str = CLIPImageProcessor(**__A ) # save in new folder model_config.save_pretrained(__A ) config.save_pretrained(__A ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(__A ) # make sure private variable is not incorrectly saved UpperCAmelCase : int = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__A, __A ) def __magic_name__ ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Tuple = Path(__A ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), ) UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(__A ) self.assertIsInstance(__A, __A ) def __magic_name__ ( self : Dict ): with self.assertRaisesRegex( __A, '''clip-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase : str = AutoImageProcessor.from_pretrained('''clip-base''' ) def __magic_name__ ( self : List[str] ): with self.assertRaisesRegex( __A, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(__A, revision='''aaaaaa''' ) def __magic_name__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __A, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ): UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __magic_name__ ( self : Union[str, Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): UpperCAmelCase : Any = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A ) UpperCAmelCase : str = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__A ) UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__A, trust_remote_code=__A ) self.assertEqual(reloaded_image_processor.__class__.__name__, '''NewImageProcessor''' ) def __magic_name__ ( self : Any ): try: AutoConfig.register('''custom''', __A ) AutoImageProcessor.register(__A, __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoImageProcessor.register(__A, __A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Dict = Path(__A ) / '''preprocessor_config.json''' UpperCAmelCase : Any = Path(__A ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) ) UpperCAmelCase : Any = CustomImageProcessor.from_pretrained(__A ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__A ) UpperCAmelCase : int = AutoImageProcessor.from_pretrained(__A ) self.assertIsInstance(__A, __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __magic_name__ ( self : List[Any] ): class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = True try: AutoConfig.register('''custom''', __A ) AutoImageProcessor.register(__A, __A ) # If remote code is not set, the default is to use local UpperCAmelCase : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase : Any = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(not hasattr(__A, '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
336
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
336
1
from __future__ import annotations from typing import TypedDict class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 42 UpperCamelCase = 42 def a__ ( UpperCAmelCase : str ) -> list[str]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(UpperCAmelCase ) )] def a__ ( UpperCAmelCase : str ) -> BWTTransformDict: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) UpperCAmelCase : Optional[int] = all_rotations(UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(UpperCAmelCase ), } return response def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: UpperCAmelCase : Optional[int] = int(UpperCAmelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(UpperCAmelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) UpperCAmelCase : Union[str, Any] = [''''''] * len(UpperCAmelCase ) for _ in range(len(UpperCAmelCase ) ): for i in range(len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": _lowerCamelCase : Any = "Provide a string that I will generate its BWT transform: " _lowerCamelCase : List[str] = input(entry_msg).strip() _lowerCamelCase : Dict = bwt_transform(s) print( f"""Burrows Wheeler transform for string '{s}' results """ f"""in '{result["bwt_string"]}'""" ) _lowerCamelCase : Optional[int] = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """ f"""we get original string '{original_string}'""" )
336
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class __UpperCAmelCase : # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) @classmethod def __magic_name__ ( cls : Any ): return cls() @dataclass class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @property def __magic_name__ ( self : Optional[int] ): return True @register_to_config def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ): pass def __magic_name__ ( self : Optional[Any] ): return KarrasVeSchedulerState.create() def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ): UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy() UpperCAmelCase : Union[str, Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, ) def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ): if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 ) else: UpperCAmelCase : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 ) UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape ) UpperCAmelCase : Tuple = sigma + gamma * sigma UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : int = sample_hat + sigma_hat * model_output UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ): raise NotImplementedError()
336
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowercase_ ( lowercase ): '''simple docstring''' def __lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" a = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , '''num_attention_heads''' ) ) class lowercase_ : '''simple docstring''' def __init__( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[Any]=32 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Dict=640 , __UpperCAmelCase : str=4 , __UpperCAmelCase : int="silu" , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Any=10 , __UpperCAmelCase : List[str]=None , ) ->Any: """simple docstring""" a = parent a = batch_size a = image_size a = patch_size a = num_channels a = last_hidden_size a = num_attention_heads a = hidden_act a = conv_kernel_size a = output_stride a = hidden_dropout_prob a = attention_probs_dropout_prob a = classifier_dropout_prob a = use_labels a = is_training a = num_labels a = initializer_range a = scope def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels, pixel_labels def __lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Any: """simple docstring""" a = MobileViTModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) ->Tuple: """simple docstring""" a = self.num_labels a = MobileViTForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) ->Optional[int]: """simple docstring""" a = self.num_labels a = MobileViTForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) a = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase_ ( lowercase , lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __snake_case = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __snake_case = False __snake_case = False __snake_case = False __snake_case = False def __lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" a = MobileViTModelTester(self ) a = MobileViTConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def __lowerCAmelCase ( self : int ) ->Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def __lowerCAmelCase ( self : Dict ) ->List[str]: """simple docstring""" pass def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__UpperCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self : str ) ->Optional[int]: """simple docstring""" pass def __lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" def check_hidden_states_output(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ): a = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) a = outputs.hidden_states a = 5 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. a = 2 for i in range(len(__UpperCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ) ->str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) @slow def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = MobileViTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _a ( ) -> Union[str, Any]: a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def __lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" a = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__UpperCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): a = model(**__UpperCAmelCase ) # verify the logits a = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) a = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) a = model.to(__UpperCAmelCase ) a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) a = prepare_img() a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): a = model(**__UpperCAmelCase ) a = outputs.logits # verify the logits a = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __UpperCAmelCase ) a = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) a = model.to(__UpperCAmelCase ) a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) a = prepare_img() a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): a = model(**__UpperCAmelCase ) a = outputs.logits.detach().cpu() a = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] ) a = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase ) a = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ) a = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
0
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a__ ( ) -> Dict: if os.name == "nt": UpperCAmelCase : List[str] = CursorInfo() UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def a__ ( ) -> Optional[int]: if os.name == "nt": UpperCAmelCase : int = CursorInfo() UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Any = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def a__ ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
336
0
'''simple docstring''' import warnings from functools import wraps from typing import Callable def lowerCAmelCase_ ( snake_case_ : Callable ) -> Callable: '''simple docstring''' @wraps(snake_case_ ) def _inner_fn(*snake_case_ : Optional[Any] , **snake_case_ : Tuple ): warnings.warn( (f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , snake_case_ , ) return fn(*snake_case_ , **snake_case_ ) return _inner_fn
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
0
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : Dict = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = """time_series_transformer""" lowerCAmelCase__ : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__(self : Any , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : int = 64 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 100 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' lowercase__ = prediction_length lowercase__ = context_length or prediction_length lowercase__ = distribution_output lowercase__ = loss lowercase__ = input_size lowercase__ = num_time_features lowercase__ = lags_sequence lowercase__ = scaling lowercase__ = num_dynamic_real_features lowercase__ = num_static_real_features lowercase__ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = cardinality else: lowercase__ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = embedding_dimension else: lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ = num_parallel_samples # Transformer architecture configuration lowercase__ = input_size * len(UpperCamelCase ) + self._number_of_features lowercase__ = d_model lowercase__ = encoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = encoder_ffn_dim lowercase__ = decoder_ffn_dim lowercase__ = encoder_layers lowercase__ = decoder_layers lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = activation_function lowercase__ = init_std lowercase__ = use_cache super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
2
from __future__ import annotations def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]: if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) UpperCAmelCase : str = number_of_bytes // partitions UpperCAmelCase : Dict = [] for i in range(UpperCAmelCase ): UpperCAmelCase : int = i * bytes_per_partition + 1 UpperCAmelCase : Optional[int] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
336
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(snake_case__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(snake_case__ ) == 1: return True A : Any = series[1] - series[0] for index in range(len(snake_case__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(snake_case__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) A : Optional[Any] = 0 for val in series: answer += val return answer / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
3
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]: if subparsers is not None: UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description ) else: UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments UpperCAmelCase : Optional[int] = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase ) return parser def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: UpperCAmelCase : List[Any] = defaults.command_file if not args.command and defaults.commands is not None: UpperCAmelCase : List[str] = defaults.commands if not args.tpu_name: UpperCAmelCase : Tuple = defaults.tpu_name if not args.tpu_zone: UpperCAmelCase : int = defaults.tpu_zone if args.accelerate_version == "dev": UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": UpperCAmelCase : Dict = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ): UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: UpperCAmelCase : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , UpperCAmelCase ): UpperCAmelCase : int = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate UpperCAmelCase : Optional[int] = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [f'''pip install {args.accelerate_version}'''] new_cmd += args.command UpperCAmelCase : int = '''; '''.join(UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess UpperCAmelCase : Any = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'''Running {" ".join(UpperCAmelCase )}''' ) return subprocess.run(UpperCAmelCase ) print('''Successfully setup pod.''' ) def a__ ( ) -> Any: UpperCAmelCase : Any = tpu_command_parser() UpperCAmelCase : Tuple = parser.parse_args() tpu_command_launcher(UpperCAmelCase )
336
0
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ): # Initialise PyTorch model lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCAmelCase = AlbertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase ) if __name__ == "__main__": __snake_case =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __snake_case =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
4
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[int] = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: print('''Loading config file...''' ) def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ): UpperCAmelCase : List[str] = [] for k, v in d.items(): UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k if isinstance(UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCAmelCase ) UpperCAmelCase : List[str] = argparse.Namespace() with open(UpperCAmelCase , '''r''' ) as yaml_file: try: UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader ) UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) ) return config def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]: UpperCAmelCase : int = MobileViTVaConfig() UpperCAmelCase : str = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase : Any = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : Any = 384 else: UpperCAmelCase : Tuple = 256 UpperCAmelCase : int = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase : Optional[Any] = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : str = 384 else: UpperCAmelCase : Dict = 256 UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase : Optional[Any] = 151 UpperCAmelCase : Tuple = 512 UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Tuple = True elif task_name.startswith('''voc_''' ): UpperCAmelCase : Dict = 21 UpperCAmelCase : str = 512 UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json''' UpperCAmelCase : Dict = True # orig_config UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase ) assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase : Union[str, Any] = '''huggingface/label-files''' UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : int = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : List[str] = val def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]: if base_model: UpperCAmelCase : Dict = '''''' else: UpperCAmelCase : Dict = '''mobilevitv2.''' UpperCAmelCase : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase : List[str] = k[8:] else: UpperCAmelCase : Dict = k if ".block." in k: UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase : Dict = [0, 1] elif i == 4: UpperCAmelCase : Dict = [0, 1, 2, 3] elif i == 5: UpperCAmelCase : int = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: UpperCAmelCase : Optional[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: UpperCAmelCase : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any: UpperCAmelCase : str = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def a__ ( ) -> Union[str, Any]: UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase ) # load original state_dict UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval() UpperCAmelCase : str = False else: UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval() UpperCAmelCase : Any = False # remove and rename some keys of load the original model UpperCAmelCase : Optional[Any] = checkpoint remove_unused_keys(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # load modified state_dict model.load_state_dict(UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase : Optional[Any] = outputs.logits UpperCAmelCase : int = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
336
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase): SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def __A (self ) -> Dict: torch.manual_seed(0 ) _lowercase =UNetaDModel( block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCAmelCase , use_timestep_embedding=UpperCAmelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _lowercase =IPNDMScheduler() _lowercase ={ '''unet''': unet, '''scheduler''': scheduler, } return components def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple: if str(UpperCAmelCase ).startswith('''mps''' ): _lowercase =torch.manual_seed(UpperCAmelCase ) else: _lowercase =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) _lowercase ={ '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def __A (self ) -> Tuple: _lowercase ='''cpu''' # ensure determinism for the device-dependent torch.Generator _lowercase =self.get_dummy_components() _lowercase =DanceDiffusionPipeline(**UpperCAmelCase ) _lowercase =pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) _lowercase =self.get_dummy_inputs(UpperCAmelCase ) _lowercase =pipe(**UpperCAmelCase ) _lowercase =output.audios _lowercase =audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _lowercase =np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __A (self ) -> Any: return super().test_save_load_local() @skip_mps def __A (self ) -> Dict: return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def __A (self ) -> Any: return super().test_save_load_optional_components() @skip_mps def __A (self ) -> Optional[Any]: return super().test_attention_slicing_forward_pass() def __A (self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): def __A (self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A (self ) -> List[str]: _lowercase =torch_device _lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _lowercase =pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) _lowercase =torch.manual_seed(0 ) _lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 ) _lowercase =output.audios _lowercase =audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _lowercase =np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def __A (self ) -> Optional[Any]: _lowercase =torch_device _lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _lowercase =pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) _lowercase =torch.manual_seed(0 ) _lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 ) _lowercase =output.audios _lowercase =audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _lowercase =np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
5
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCAmelCase ( lowerCamelCase__ ): def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase : str = '''__cached_''' + self.fget.__name__ UpperCAmelCase : int = getattr(__A, __A, __A ) if cached is None: UpperCAmelCase : Any = self.fget(__A ) setattr(__A, __A, __A ) return cached def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_torch_fx_proxy(UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]: return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : str ) -> Tuple: return _is_numpy(UpperCAmelCase ) def a__ ( UpperCAmelCase : str ) -> List[Any]: import torch return isinstance(UpperCAmelCase , torch.Tensor ) def a__ ( UpperCAmelCase : str ) -> List[Any]: return False if not is_torch_available() else _is_torch(UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> List[str]: import torch return isinstance(UpperCAmelCase , torch.device ) def a__ ( UpperCAmelCase : Any ) -> Any: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: import torch if isinstance(UpperCAmelCase , UpperCAmelCase ): if hasattr(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ) else: return False return isinstance(UpperCAmelCase , torch.dtype ) def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase ) def a__ ( UpperCAmelCase : Any ) -> str: import tensorflow as tf return isinstance(UpperCAmelCase , tf.Tensor ) def a__ ( UpperCAmelCase : int ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[str] ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(UpperCAmelCase ) return type(UpperCAmelCase ) == tf.Tensor def a__ ( UpperCAmelCase : int ) -> List[Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[Any] ) -> Dict: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase , jnp.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]: return False if not is_flax_available() else _is_jax(UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Tuple: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return [to_py_obj(UpperCAmelCase ) for o in obj] elif is_tf_tensor(UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ).tolist() elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( UpperCAmelCase : Any ) -> List[str]: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return np.array(UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ) else: return obj class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = fields(self ) # Safety and consistency checks if not len(__A ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase : int = getattr(self, class_fields[0].name ) UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__A ): if isinstance(__A, __A ): UpperCAmelCase : Tuple = first_field.items() UpperCAmelCase : Any = True else: try: UpperCAmelCase : Optional[Any] = iter(__A ) UpperCAmelCase : Optional[Any] = True except TypeError: UpperCAmelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__A ): if ( not isinstance(__A, (list, tuple) ) or not len(__A ) == 2 or not isinstance(element[0], __A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: UpperCAmelCase : Union[str, Any] = element[1] elif first_field is not None: UpperCAmelCase : Union[str, Any] = first_field else: for field in class_fields: UpperCAmelCase : Optional[Any] = getattr(self, field.name ) if v is not None: UpperCAmelCase : Optional[int] = v def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Any, *__A : Dict, **__A : str ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str], __A : List[str] ): if isinstance(__A, __A ): UpperCAmelCase : int = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__A, __A ) super().__setattr__(__A, __A ) def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ): # Will raise a KeyException if needed super().__setitem__(__A, __A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__A, __A ) def __magic_name__ ( self : List[str] ): return tuple(self[k] for k in self.keys() ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def __magic_name__ ( cls : List[Any], __A : Tuple ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """longest""" UpperCamelCase = """max_length""" UpperCamelCase = """do_not_pad""" class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """pt""" UpperCamelCase = """tf""" UpperCamelCase = """np""" UpperCamelCase = """jax""" class __UpperCAmelCase : def __init__( self : Any, __A : List[ContextManager] ): UpperCAmelCase : Tuple = context_managers UpperCAmelCase : Tuple = ExitStack() def __enter__( self : Any ): for context_manager in self.context_managers: self.stack.enter_context(__A ) def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ): self.stack.__exit__(*__A, **__A ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : int = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( UpperCAmelCase : Dict ) -> Any: UpperCAmelCase : List[Any] = model_class.__name__ UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]: def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ): for k, v in d.items(): UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k if v and isinstance(UpperCAmelCase , UpperCAmelCase ): yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) @contextmanager def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]: if is_numpy_array(UpperCAmelCase ): return np.transpose(UpperCAmelCase , axes=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.T if axes is None else array.permute(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.reshape(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.reshape(UpperCAmelCase , UpperCAmelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any: if is_numpy_array(UpperCAmelCase ): return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if is_numpy_array(UpperCAmelCase ): return np.expand_dims(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.unsqueeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.size(UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.numel() elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.size(UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict: for key, value in auto_map.items(): if isinstance(UpperCAmelCase , (tuple, list) ): UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase : List[Any] = f'''{repo_id}--{value}''' return auto_map def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCAmelCase ): UpperCAmelCase : Any = base_class.__module__ UpperCAmelCase : Dict = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
336
0
from collections import defaultdict from math import gcd def __lowerCAmelCase ( a__ = 150_0000 ) -> int: __a = defaultdict(a__ ) __a = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , a__ , 2 ): if gcd(a__ , a__ ) > 1: continue __a = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(a__ , limit + 1 , a__ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"{solution() = }")
6
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LayoutLMTokenizer UpperCamelCase = LayoutLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def __magic_name__ ( self : Any ): super().setUp() UpperCAmelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__ ( self : Union[str, Any], **__A : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Optional[int], __A : int ): UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running''' UpperCAmelCase : Optional[int] = '''unwanted, running''' return input_text, output_text def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] ) def __magic_name__ ( self : Optional[int] ): pass
336
0
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" A__ = False if num < 0: A__ = True A__ = -num A__ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary ) return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
7
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = mask_ratio UpperCAmelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ): UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ): UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A ) UpperCAmelCase : int = model(__A, training=__A ) # expected sequence length = num_patches UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A ) UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(__A, training=__A ) UpperCAmelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = TFViTMAEModelTester(self ) UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : str ): UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(__A ) UpperCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __magic_name__ ( self : int ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : Dict = model(__A, noise=__A ) UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A ) UpperCAmelCase : Dict = outputs_dict[0].numpy() UpperCAmelCase : Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 ) def __magic_name__ ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A : Union[str, Any] ): UpperCAmelCase : str = {} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): UpperCAmelCase : Tuple = v.numpy() else: UpperCAmelCase : str = np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : Any = self._prepare_for_class(__A, __A ) UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A ) UpperCAmelCase : str = model(__A, noise=__A ) UpperCAmelCase : str = model(**__A, noise=__A ) self.assert_outputs_same(__A, __A ) def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ): # make masks reproducible np.random.seed(2 ) UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : int = tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : List[Any] = tf_noise super().check_pt_tf_models(__A, __A, __A ) def __magic_name__ ( self : str ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__A, __A ),) if isinstance(__A, __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A, '''_keras_serializable''', __A ) } UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(__A ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : Tuple = main_layer_class(__A ) UpperCAmelCase : int = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) ) UpperCAmelCase : List[Any] = model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' ) model.save(__A ) UpperCAmelCase : List[str] = tf.keras.models.load_model( __A, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A, tf.keras.Model ) UpperCAmelCase : Tuple = model(__A ) self.assert_outputs_same(__A, __A ) @slow def __magic_name__ ( self : Dict ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(__A ) UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A ) UpperCAmelCase : Union[str, Any] = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() UpperCAmelCase : Union[str, Any] = 0 else: UpperCAmelCase : Optional[int] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A, saved_model=__A ) UpperCAmelCase : Dict = model_class.from_pretrained(__A ) UpperCAmelCase : str = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy() UpperCAmelCase : Dict = 0 else: UpperCAmelCase : Any = after_outputs['''logits'''].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A, 1E-5 ) def __magic_name__ ( self : Optional[Any] ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : List[Any] = model(__A, noise=__A ) UpperCAmelCase : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) UpperCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : str = model_class.from_config(model.config ) UpperCAmelCase : List[str] = new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Tuple = new_model(__A, noise=__A ) self.assert_outputs_same(__A, __A ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Dict: UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__ ( self : str ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : Optional[int] = ViTMAEConfig() UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : Optional[int] = model(**__A, noise=__A ) # verify the logits UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
336
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = "sew" def __init__( self : Dict , _UpperCamelCase : Any=3_2 , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : List[Any]=1_2 , _UpperCamelCase : Optional[Any]=3_0_7_2 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Tuple=1e-5 , _UpperCamelCase : List[str]="group" , _UpperCamelCase : int="gelu" , _UpperCamelCase : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _UpperCamelCase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCamelCase : int=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=1_2_8 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=0.05 , _UpperCamelCase : List[Any]=1_0 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=1_0 , _UpperCamelCase : Any=0 , _UpperCamelCase : Dict="mean" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=2_5_6 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : int , ) ->Optional[int]: super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = feat_extract_norm snake_case_ = feat_extract_activation snake_case_ = list(_UpperCamelCase ) snake_case_ = list(_UpperCamelCase ) snake_case_ = list(_UpperCamelCase ) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = len(self.conv_dim ) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = squeeze_factor snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # sequence classification snake_case_ = use_weighted_layer_sum snake_case_ = classifier_proj_size @property def snake_case__( self : Dict ) ->Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
8
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
0
import argparse import datetime def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __SCREAMING_SNAKE_CASE : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month __SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) __SCREAMING_SNAKE_CASE : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day __SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator __SCREAMING_SNAKE_CASE : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year __SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation __SCREAMING_SNAKE_CASE : List[str] = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: __SCREAMING_SNAKE_CASE : Dict = y - 1 __SCREAMING_SNAKE_CASE : List[str] = m + 12 # maths var __SCREAMING_SNAKE_CASE : int = int(str(lowercase__ )[:2] ) __SCREAMING_SNAKE_CASE : int = int(str(lowercase__ )[2:] ) __SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.39 ) __SCREAMING_SNAKE_CASE : int = int(c / 4 ) __SCREAMING_SNAKE_CASE : int = int(k / 4 ) __SCREAMING_SNAKE_CASE : int = int(d + k ) __SCREAMING_SNAKE_CASE : int = int(t + u + v + x ) __SCREAMING_SNAKE_CASE : int = int(z - (2 * c) ) __SCREAMING_SNAKE_CASE : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response __SCREAMING_SNAKE_CASE : str = F'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : int =argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) __lowerCAmelCase : int =parser.parse_args() zeller(args.date_input)
9
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
0
import numpy as np def lowerCAmelCase_ ( __a ) -> np.array: """simple docstring""" return 1 / (1 + np.exp(-vector )) def lowerCAmelCase_ ( __a ) -> np.array: """simple docstring""" return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
10
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCAmelCase : def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Any = hidden_act UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope UpperCAmelCase : List[str] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : str = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, ) def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ): UpperCAmelCase : int = ViTHybridModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ): UpperCAmelCase : str = self.type_sequence_label_size UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ViTHybridModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : int ): UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, nn.Linear ) ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(__A ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : str = [*signature.parameters.keys()] UpperCAmelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = _config_zero_init(__A ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(config=__A ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def __magic_name__ ( self : List[str] ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> Tuple: UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __A ) UpperCAmelCase : Tuple = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**__A ) # verify the logits UpperCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow @require_accelerate def __magic_name__ ( self : Dict ): UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**__A ) UpperCAmelCase : Any = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Dict = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
336
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "dandelin/vilt-b32-finetuned-vqa" __SCREAMING_SNAKE_CASE = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __SCREAMING_SNAKE_CASE = "image_qa" __SCREAMING_SNAKE_CASE = AutoProcessor __SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering __SCREAMING_SNAKE_CASE = ["image", "text"] __SCREAMING_SNAKE_CASE = ["text"] def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> int: requires_backends(self , ["vision"]) super().__init__(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple: return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt") def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: with torch.no_grad(): return self.model(**__lowerCamelCase).logits def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: _A : str = outputs.argmax(-1).item() return self.model.config.idalabel[idx]
11
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = ['pixel_values'] def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad __lowerCamelCase = pad_size def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ): __lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ ) __lowerCamelCase = (old_height // size + 1) * size - old_height __lowerCamelCase = (old_width // size + 1) * size - old_width return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ): __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_pad if do_pad is not None else self.do_pad __lowerCamelCase = pad_size if pad_size is not None else self.pad_size __lowerCamelCase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_pad: __lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
12
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCAmelCase : def __magic_name__ ( self : int, __A : Dict ): raise NotImplementedError() def __magic_name__ ( self : int ): raise NotImplementedError() class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ): UpperCAmelCase : List[str] = tokenizer UpperCAmelCase : str = skip_prompt UpperCAmelCase : List[str] = decode_kwargs # variables used in the streaming process UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = 0 UpperCAmelCase : Union[str, Any] = True def __magic_name__ ( self : Dict, __A : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: UpperCAmelCase : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] UpperCAmelCase : int = [] UpperCAmelCase : int = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __magic_name__ ( self : str ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) UpperCAmelCase : Dict = text[self.print_len :] UpperCAmelCase : List[Any] = [] UpperCAmelCase : List[Any] = 0 else: UpperCAmelCase : Dict = '''''' UpperCAmelCase : str = True self.on_finalized_text(__A, stream_end=__A ) def __magic_name__ ( self : List[str], __A : str, __A : bool = False ): print(__A, flush=__A, end='''''' if not stream_end else None ) def __magic_name__ ( self : List[Any], __A : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ): super().__init__(__A, __A, **__A ) UpperCAmelCase : Dict = Queue() UpperCAmelCase : Any = None UpperCAmelCase : Any = timeout def __magic_name__ ( self : Dict, __A : str, __A : bool = False ): self.text_queue.put(__A, timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout ) def __iter__( self : int ): return self def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
336
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Tuple = IFInpaintingSuperResolutionPipeline _UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def _SCREAMING_SNAKE_CASE ( self : str): return self._get_superresolution_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=0): if str(lowerCAmelCase__).startswith("mps"): SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: str = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _SCREAMING_SNAKE_CASE ( self : Dict): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def _SCREAMING_SNAKE_CASE ( self : List[Any]): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA") def _SCREAMING_SNAKE_CASE ( self : Any): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
13
import numpy # List of input, output pairs _lowerCamelCase : Dict = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _lowerCamelCase : Dict = [2, 4, 1, 5] _lowerCamelCase : Dict = len(train_data) _lowerCamelCase : int = 0.0_0_9 def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict: return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output( UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Any: UpperCAmelCase : str = 0 for i in range(len(UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict: UpperCAmelCase : Optional[int] = 0 for i in range(UpperCAmelCase ): if index == -1: summation_value += _error(UpperCAmelCase ) else: summation_value += _error(UpperCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( UpperCAmelCase : Dict ) -> Dict: UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] = 0.000002 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = get_cost_derivative(i - 1 ) UpperCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ): break UpperCAmelCase : int = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ) -> List[Any]: for i in range(len(UpperCAmelCase ) ): print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
336
0
import collections import importlib.util import os import re from pathlib import Path _lowerCamelCase : str = """src/transformers""" # Matches is_xxx_available() _lowerCamelCase : Optional[int] = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} _lowerCamelCase : Tuple = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCamelCase : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available _lowerCamelCase : Tuple = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") _lowerCamelCase : Union[str, Any] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCamelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", _lowerCamelCase : Optional[int] = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCamelCase : int = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo _lowerCamelCase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: _lowerCamelCase : str = re.compile(r"""^\s*try:""") # Catches a line with else: _lowerCamelCase : Any = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" if _re_test_backend.search(lowercase_ ) is None: return None A__ = [b[0] for b in _re_backend.findall(lowercase_ )] backends.sort() return "_and_".join(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A__ = f.readlines() A__ = 0 while line_index < len(lowercase_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase_ ): return None # First grab the objects without a specific backend in _import_structure A__ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: A__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase_ ): A__ = _re_one_line_import_struct.search(lowercase_ ).groups()[0] A__ = re.findall('''\[([^\]]+)\]''' , lowercase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue A__ = _re_import_struct_key_value.search(lowercase_ ) if single_line_import_search is not None: A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 A__ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. A__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): A__ = lines[line_index] if _re_import_struct_add_one.search(lowercase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase_ ) is not None: A__ = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(''', ''' ) A__ = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_between_brackets.search(lowercase_ ) is not None: A__ = _re_between_brackets.search(lowercase_ ).groups()[0].split(''', ''' ) A__ = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_quote_object.search(lowercase_ ) is not None: objects.append(_re_quote_object.search(lowercase_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 A__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend A__ = [] while ( line_index < len(lowercase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): A__ = lines[line_index] A__ = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 A__ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(lowercase_ ): # If the line is an if is_backend_available, we grab all objects associated. A__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): A__ = lines[line_index] A__ = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 A__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" def find_duplicates(lowercase_ ): return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] A__ = [] for key in import_dict_objects.keys(): A__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) A__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): A__ = '''base imports''' if key == '''none''' else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" A__ = [] for root, _, files in os.walk(lowercase_ ): if "__init__.py" in files: A__ = os.path.join(lowercase_ , '''__init__.py''' ) A__ = parse_init(lowercase_ ) if objects is not None: A__ = analyze_results(*lowercase_ ) if len(lowercase_ ) > 0: A__ = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(lowercase_ ) ) if len(lowercase_ ) > 0: raise ValueError('''\n\n'''.join(lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = [] for path, directories, files in os.walk(lowercase_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(lowercase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase_ ) / folder).glob('''*.py''' ) ) ) == 0: continue A__ = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) ) A__ = short_path.replace(os.path.sep , '''.''' ) submodules.append(lowercase_ ) for fname in files: if fname == "__init__.py": continue A__ = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) ) A__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(lowercase_ ) return submodules _lowerCamelCase : int = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(lowercase_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) A__ = spec.loader.load_module() A__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(lowercase_ ) > 0: A__ = '''\n'''.join(f"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
14
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = (DDIMParallelScheduler,) snake_case_ = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCamelCase_ ( self : Dict ,**A : List[Any] ): __A = { "num_train_timesteps": 10_00, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**A ) return config def UpperCamelCase_ ( self : Union[str, Any] ,**A : Union[str, Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config(**A ) __A = scheduler_class(**A ) __A , __A = 10, 0.0 __A = self.dummy_model() __A = self.dummy_sample_deter scheduler.set_timesteps(A ) for t in scheduler.timesteps: __A = model(A ,A ) __A = scheduler.step(A ,A ,A ,A ).prev_sample return sample def UpperCamelCase_ ( self : Dict ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=A ) def UpperCamelCase_ ( self : Optional[int] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A ) __A = self.scheduler_classes[0] __A = self.get_scheduler_config(steps_offset=1 ) __A = scheduler_class(**A ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def UpperCamelCase_ ( self : Dict ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A ,beta_end=A ) def UpperCamelCase_ ( self : Any ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A ) def UpperCamelCase_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A ) def UpperCamelCase_ ( self : Optional[Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=A ) def UpperCamelCase_ ( self : Any ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A ) def UpperCamelCase_ ( self : Any ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A ) def UpperCamelCase_ ( self : List[str] ): self.check_over_configs(thresholding=A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A ,prediction_type=A ,sample_max_value=A ,) def UpperCamelCase_ ( self : Any ): for t in [1, 10, 49]: self.check_over_forward(time_step=A ) def UpperCamelCase_ ( self : List[Any] ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ): self.check_over_forward(time_step=A ,num_inference_steps=A ) def UpperCamelCase_ ( self : Dict ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A ,eta=A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5 def UpperCamelCase_ ( self : List[Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A , __A = 10, 0.0 scheduler.set_timesteps(A ) __A = self.dummy_model() __A = self.dummy_sample_deter __A = self.dummy_sample_deter + 0.1 __A = self.dummy_sample_deter - 0.1 __A = samplea.shape[0] __A = torch.stack([samplea, samplea, samplea] ,dim=0 ) __A = torch.arange(A )[0:3, None].repeat(1 ,A ) __A = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) __A = scheduler.batch_step_no_noise(A ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,A ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def UpperCamelCase_ ( self : List[Any] ): __A = self.full_loop() __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def UpperCamelCase_ ( self : Tuple ): __A = self.full_loop(prediction_type="v_prediction" ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def UpperCamelCase_ ( self : Union[str, Any] ): # We specify different beta, so that the first alpha is 0.99 __A = self.full_loop(set_alpha_to_one=A ,beta_start=0.01 ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def UpperCamelCase_ ( self : Any ): # We specify different beta, so that the first alpha is 0.99 __A = self.full_loop(set_alpha_to_one=A ,beta_start=0.01 ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
15
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''), (f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''), (f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''), (f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any: for i in range(config.num_hidden_layers ): UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : str = q_bias UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) UpperCAmelCase : str = gamma_a UpperCAmelCase : Dict = gamma_a def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : str = val def a__ ( ) -> Optional[int]: UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]: UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase : List[Any] = 1_024 UpperCAmelCase : Optional[Any] = 4_096 UpperCAmelCase : Any = 24 UpperCAmelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : List[Any] = '''huggingface/label-files''' UpperCAmelCase : Any = '''rvlcdip-id2label.json''' UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image UpperCAmelCase : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str = encoding['''pixel_values'''] UpperCAmelCase : Any = model(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = outputs.logits # verify logits UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
336
0
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def __UpperCAmelCase ( ) -> Dict: lowercase__ : str = 9 lowercase__ : Optional[int] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowercase__ : int = kruskal(__lowerCamelCase , __lowerCamelCase ) lowercase__ : str = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
16
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
0
"""simple docstring""" def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> str: '''simple docstring''' if not isinstance(UpperCamelCase_, UpperCamelCase_): raise ValueError("iterations must be defined as integers") if not isinstance(UpperCamelCase_, UpperCamelCase_) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0") if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz") __lowercase = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase_) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = {"vocab_file": "vocab.txt"} _lowerCamelCase : List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase : List[Any] = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def a__ ( UpperCAmelCase : List[str] ) -> Any: with open(UpperCAmelCase , '''r''' ) as f: UpperCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ): super().__init__(**__A ) UpperCAmelCase : Tuple = load_vocab_file(__A ) UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Union[str, Any] = unk_token UpperCAmelCase : Optional[Any] = cls_token UpperCAmelCase : Optional[int] = pad_token UpperCAmelCase : Optional[int] = mask_token UpperCAmelCase : List[str] = eos_token UpperCAmelCase : Optional[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __magic_name__ ( self : Tuple, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : List[Any], __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ): return text.split() def __magic_name__ ( self : Optional[int], __A : Dict=False ): return len(self._id_to_token ) def __magic_name__ ( self : int ): return {token: i for i, token in enumerate(self.all_tokens )} def __magic_name__ ( self : Tuple, __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: mask += [0] * len(__A ) + [1] return mask def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ): UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__A, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __magic_name__ ( self : Dict ): return self.get_vocab_size(with_added_tokens=__A ) def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ): return super()._add_tokens(__A, special_tokens=__A )
336
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class a__ ( A__ ): A = 'xlm-prophetnet' A = ['past_key_values'] A = { 'num_attention_heads': 'num_encoder_attention_heads', } def __init__( self : Optional[Any],_A : Optional[float] = 0.1,_A : Optional[Union[str, Callable]] = "gelu",_A : Optional[int] = 3_0522,_A : Optional[int] = 1024,_A : Optional[int] = 4096,_A : Optional[int] = 12,_A : Optional[int] = 16,_A : Optional[int] = 4096,_A : Optional[int] = 12,_A : Optional[int] = 16,_A : Optional[float] = 0.1,_A : Optional[float] = 0.1,_A : Optional[int] = 512,_A : Optional[float] = 0.02,_A : Optional[bool] = True,_A : Optional[bool] = True,_A : Optional[int] = 0,_A : Optional[int] = 2,_A : Optional[int] = 32,_A : Optional[int] = 128,_A : Optional[bool] = False,_A : Optional[float] = 0.0,_A : Optional[bool] = True,_A : Optional[int] = 0,_A : Optional[int] = 1,_A : Optional[int] = 2,**_A : str,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_size SCREAMING_SNAKE_CASE_ : str = encoder_ffn_dim SCREAMING_SNAKE_CASE_ : Any = num_encoder_layers SCREAMING_SNAKE_CASE_ : Any = num_encoder_attention_heads SCREAMING_SNAKE_CASE_ : int = decoder_ffn_dim SCREAMING_SNAKE_CASE_ : Optional[int] = num_decoder_layers SCREAMING_SNAKE_CASE_ : str = num_decoder_attention_heads SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[int] = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_ : Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_ : int = ngram SCREAMING_SNAKE_CASE_ : Optional[int] = num_buckets SCREAMING_SNAKE_CASE_ : Optional[int] = relative_max_distance SCREAMING_SNAKE_CASE_ : List[Any] = disable_ngram_loss SCREAMING_SNAKE_CASE_ : List[str] = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_ : str = attention_dropout SCREAMING_SNAKE_CASE_ : Tuple = activation_dropout SCREAMING_SNAKE_CASE_ : List[Any] = dropout SCREAMING_SNAKE_CASE_ : Any = use_cache super().__init__( pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,add_cross_attention=_A,decoder_start_token_id=_A,**_A,) @property def __UpperCamelCase ( self : Tuple ): """simple docstring""" return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __UpperCamelCase ( self : Optional[int],_A : str ): """simple docstring""" raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
18
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) ) class __UpperCAmelCase : def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ): UpperCAmelCase : Optional[int] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = depth_multiplier UpperCAmelCase : Union[str, Any] = depth_divisible_by UpperCAmelCase : Optional[Any] = min_depth UpperCAmelCase : List[str] = expand_ratio UpperCAmelCase : Dict = tf_padding UpperCAmelCase : str = output_stride UpperCAmelCase : Union[str, Any] = first_layer_is_expansion UpperCAmelCase : List[Any] = finegrained_output UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[str] = is_training UpperCAmelCase : Tuple = num_labels UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = scope def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ): UpperCAmelCase : Any = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : Any = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[int] = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCAmelCase : Optional[Any] = model(__A, labels=__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = MobileNetVaModelTester(self ) UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__ ( self : Any ): pass def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(__A ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : int ): def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ): UpperCAmelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Optional[Any] = outputs.hidden_states UpperCAmelCase : List[Any] = 1_6 self.assertEqual(len(__A ), __A ) UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __magic_name__ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> int: UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : str = model(**__A ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = model.to(__A ) UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**__A ) UpperCAmelCase : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ], device=__A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
336
0
import qiskit def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register lowerCamelCase_ = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCamelCase_ = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowerCamelCase__ ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
19
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """codegen""" UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ): UpperCAmelCase : int = vocab_size UpperCAmelCase : Tuple = n_ctx UpperCAmelCase : Tuple = n_positions UpperCAmelCase : Optional[int] = n_embd UpperCAmelCase : Union[str, Any] = n_layer UpperCAmelCase : List[str] = n_head UpperCAmelCase : Tuple = n_inner UpperCAmelCase : int = rotary_dim UpperCAmelCase : List[Any] = activation_function UpperCAmelCase : List[str] = resid_pdrop UpperCAmelCase : Optional[Any] = embd_pdrop UpperCAmelCase : str = attn_pdrop UpperCAmelCase : Tuple = layer_norm_epsilon UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : Any = bos_token_id UpperCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ): super().__init__(__A, task=__A, patching_specs=__A, use_past=__A ) if not getattr(self._config, '''pad_token_id''', __A ): # TODO: how to do that better? UpperCAmelCase : Union[str, Any] = 0 @property def __magic_name__ ( self : str ): UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A, direction='''inputs''' ) UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __magic_name__ ( self : Dict ): return self._config.n_layer @property def __magic_name__ ( self : List[str] ): return self._config.n_head def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ): UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs( __A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A ) # We need to order the input in the way they appears in the forward() UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase : str = seqlen + 2 UpperCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase : Optional[int] = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 ) return ordered_inputs @property def __magic_name__ ( self : Tuple ): return 1_3
336
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=() , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="no" , SCREAMING_SNAKE_CASE__="29500" ) -> Union[str, Any]: lowercase : str = False lowercase : List[Any] = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): lowercase : Tuple = True elif "IPython" in sys.modules: lowercase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() ) try: lowercase : List[str] = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: lowercase : List[Any] = 8 lowercase : List[str] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type="""TPU""" ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*SCREAMING_SNAKE_CASE__ ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE__ , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type="""MULTI_GPU""" ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowercase : Union[str, Any] = """1""" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=() , SCREAMING_SNAKE_CASE__=2 ) -> List[Any]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE__ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ): lowercase : Tuple = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ ) start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" )
20
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
336
0
import functools def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int: # Validation if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(lowerCamelCase_ ) != 3 or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(lowerCamelCase_ ) == 0: return 0 if min(lowerCamelCase_ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(lowerCamelCase_ ) >= 366: raise ValueError('All days elements should be less than 366' ) _lowercase : Optional[int] = set(lowerCamelCase_ ) @functools.cache def dynamic_programming(lowerCamelCase_ ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
21
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class __UpperCAmelCase : # setable values UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) @classmethod def __magic_name__ ( cls : Any ): return cls() @dataclass class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @property def __magic_name__ ( self : Optional[int] ): return True @register_to_config def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ): pass def __magic_name__ ( self : Optional[Any] ): return KarrasVeSchedulerState.create() def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ): UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy() UpperCAmelCase : Union[str, Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, ) def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ): if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 ) else: UpperCAmelCase : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 ) UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape ) UpperCAmelCase : Tuple = sigma + gamma * sigma UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : int = sample_hat + sigma_hat * model_output UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ): UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A ) def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ): raise NotImplementedError()
336
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __UpperCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a__ ( ) -> Dict: if os.name == "nt": UpperCAmelCase : List[str] = CursorInfo() UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Dict = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def a__ ( ) -> Optional[int]: if os.name == "nt": UpperCAmelCase : int = CursorInfo() UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) UpperCAmelCase : Any = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def a__ ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
336
0
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> List[str]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file UpperCAmelCase : List[str] = TapasConfig.from_json_file(_lowerCAmelCase ) # set absolute/relative position embeddings parameter UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": UpperCAmelCase : Any = TapasForQuestionAnswering(config=_lowerCAmelCase ) elif task == "WTQ": # run_task_main.py hparams UpperCAmelCase : int = 4 UpperCAmelCase : int = True # hparam_utils.py hparams UpperCAmelCase : Union[str, Any] = 0.6_6_4_6_9_4 UpperCAmelCase : Tuple = 0.2_0_7_9_5_1 UpperCAmelCase : Dict = 0.1_2_1_1_9_4 UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = True UpperCAmelCase : List[Any] = False UpperCAmelCase : Tuple = 0.0_3_5_2_5_1_3 UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCAmelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : Tuple = False # hparam_utils.py hparams UpperCAmelCase : Union[str, Any] = 3_6.4_5_1_9 UpperCAmelCase : Optional[Any] = 0.9_0_3_4_2_1 UpperCAmelCase : Dict = 2_2_2.0_8_8 UpperCAmelCase : int = True UpperCAmelCase : Tuple = True UpperCAmelCase : Tuple = True UpperCAmelCase : Any = 0.7_6_3_1_4_1 UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_lowerCAmelCase ) elif task == "TABFACT": UpperCAmelCase : List[str] = TapasForSequenceClassification(config=_lowerCAmelCase ) elif task == "MLM": UpperCAmelCase : List[str] = TapasForMaskedLM(config=_lowerCAmelCase ) elif task == "INTERMEDIATE_PRETRAINING": UpperCAmelCase : List[Any] = TapasModel(config=_lowerCAmelCase ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowerCAmelCase ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) UpperCAmelCase : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 ) tokenizer.save_pretrained(_lowerCAmelCase ) print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCamelCase__: Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase__: Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
23
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
0
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
from __future__ import annotations def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]: if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) UpperCAmelCase : str = number_of_bytes // partitions UpperCAmelCase : Dict = [] for i in range(UpperCAmelCase ): UpperCAmelCase : int = i * bytes_per_partition + 1 UpperCAmelCase : Optional[int] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
336
0
"""simple docstring""" import os import sys import unittest UpperCAmelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCAmelCase__ : List[str] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') UpperCAmelCase__ : Dict = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""BertModelTest""": """BertModelTester"""} SCREAMING_SNAKE_CASE__ : Any = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } SCREAMING_SNAKE_CASE__ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } SCREAMING_SNAKE_CASE__ : int = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
25
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]: if subparsers is not None: UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description ) else: UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments UpperCAmelCase : Optional[int] = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase ) return parser def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: UpperCAmelCase : List[Any] = defaults.command_file if not args.command and defaults.commands is not None: UpperCAmelCase : List[str] = defaults.commands if not args.tpu_name: UpperCAmelCase : Tuple = defaults.tpu_name if not args.tpu_zone: UpperCAmelCase : int = defaults.tpu_zone if args.accelerate_version == "dev": UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": UpperCAmelCase : Dict = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ): UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: UpperCAmelCase : int = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , UpperCAmelCase ): UpperCAmelCase : int = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate UpperCAmelCase : Optional[int] = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [f'''pip install {args.accelerate_version}'''] new_cmd += args.command UpperCAmelCase : int = '''; '''.join(UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess UpperCAmelCase : Any = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'''Running {" ".join(UpperCAmelCase )}''' ) return subprocess.run(UpperCAmelCase ) print('''Successfully setup pod.''' ) def a__ ( ) -> Any: UpperCAmelCase : Any = tpu_command_parser() UpperCAmelCase : Tuple = parser.parse_args() tpu_command_launcher(UpperCAmelCase )
336
0
import argparse import json from tqdm import tqdm def lowerCAmelCase_ ( ): _A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""",type=snake_case_,default="""biencoder-nq-dev.json""",help="""Path to raw DPR training data""",) parser.add_argument( """--evaluation_set""",type=snake_case_,help="""where to store parsed evaluation_set file""",) parser.add_argument( """--gold_data_path""",type=snake_case_,help="""where to store parsed gold_data_path file""",) _A : str = parser.parse_args() with open(args.src_path,"""r""" ) as src_file, open(args.evaluation_set,"""w""" ) as eval_file, open( args.gold_data_path,"""w""" ) as gold_file: _A : List[Any] = json.load(snake_case_ ) for dpr_record in tqdm(snake_case_ ): _A : Union[str, Any] = dpr_record["""question"""] _A : List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(snake_case_ ) + """\n""" ) if __name__ == "__main__": main()
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[int] = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: print('''Loading config file...''' ) def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ): UpperCAmelCase : List[str] = [] for k, v in d.items(): UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k if isinstance(UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCAmelCase ) UpperCAmelCase : List[str] = argparse.Namespace() with open(UpperCAmelCase , '''r''' ) as yaml_file: try: UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader ) UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) ) return config def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]: UpperCAmelCase : int = MobileViTVaConfig() UpperCAmelCase : str = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase : Any = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : Any = 384 else: UpperCAmelCase : Tuple = 256 UpperCAmelCase : int = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase : Optional[Any] = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase : str = 384 else: UpperCAmelCase : Dict = 256 UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase : Optional[Any] = 151 UpperCAmelCase : Tuple = 512 UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Tuple = True elif task_name.startswith('''voc_''' ): UpperCAmelCase : Dict = 21 UpperCAmelCase : str = 512 UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json''' UpperCAmelCase : Dict = True # orig_config UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase ) assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase : Union[str, Any] = '''huggingface/label-files''' UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : int = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} return config def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : List[str] = val def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]: if base_model: UpperCAmelCase : Dict = '''''' else: UpperCAmelCase : Dict = '''mobilevitv2.''' UpperCAmelCase : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase : List[str] = k[8:] else: UpperCAmelCase : Dict = k if ".block." in k: UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase : Dict = [0, 1] elif i == 4: UpperCAmelCase : Dict = [0, 1, 2, 3] elif i == 5: UpperCAmelCase : int = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: UpperCAmelCase : Optional[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: UpperCAmelCase : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any: UpperCAmelCase : str = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def a__ ( ) -> Union[str, Any]: UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase ) # load original state_dict UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval() UpperCAmelCase : str = False else: UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval() UpperCAmelCase : Any = False # remove and rename some keys of load the original model UpperCAmelCase : Optional[Any] = checkpoint remove_unused_keys(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # load modified state_dict model.load_state_dict(UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase : Optional[Any] = outputs.logits UpperCAmelCase : int = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
336
0
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCAmelCase ( lowerCamelCase__ ): def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase : str = '''__cached_''' + self.fget.__name__ UpperCAmelCase : int = getattr(__A, __A, __A ) if cached is None: UpperCAmelCase : Any = self.fget(__A ) setattr(__A, __A, __A ) return cached def a__ ( UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_torch_fx_proxy(UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]: return isinstance(UpperCAmelCase , np.ndarray ) def a__ ( UpperCAmelCase : str ) -> Tuple: return _is_numpy(UpperCAmelCase ) def a__ ( UpperCAmelCase : str ) -> List[Any]: import torch return isinstance(UpperCAmelCase , torch.Tensor ) def a__ ( UpperCAmelCase : str ) -> List[Any]: return False if not is_torch_available() else _is_torch(UpperCAmelCase ) def a__ ( UpperCAmelCase : Tuple ) -> List[str]: import torch return isinstance(UpperCAmelCase , torch.device ) def a__ ( UpperCAmelCase : Any ) -> Any: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: import torch if isinstance(UpperCAmelCase , UpperCAmelCase ): if hasattr(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ) else: return False return isinstance(UpperCAmelCase , torch.dtype ) def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase ) def a__ ( UpperCAmelCase : Any ) -> str: import tensorflow as tf return isinstance(UpperCAmelCase , tf.Tensor ) def a__ ( UpperCAmelCase : int ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[str] ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(UpperCAmelCase ) return type(UpperCAmelCase ) == tf.Tensor def a__ ( UpperCAmelCase : int ) -> List[Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase ) def a__ ( UpperCAmelCase : List[Any] ) -> Dict: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase , jnp.ndarray ) def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]: return False if not is_flax_available() else _is_jax(UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Tuple: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return [to_py_obj(UpperCAmelCase ) for o in obj] elif is_tf_tensor(UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ).tolist() elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( UpperCAmelCase : Any ) -> List[str]: if isinstance(UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()} elif isinstance(UpperCAmelCase , (list, tuple) ): return np.array(UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase ): return np.asarray(UpperCAmelCase ) else: return obj class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Optional[Any] = fields(self ) # Safety and consistency checks if not len(__A ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase : int = getattr(self, class_fields[0].name ) UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__A ): if isinstance(__A, __A ): UpperCAmelCase : Tuple = first_field.items() UpperCAmelCase : Any = True else: try: UpperCAmelCase : Optional[Any] = iter(__A ) UpperCAmelCase : Optional[Any] = True except TypeError: UpperCAmelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__A ): if ( not isinstance(__A, (list, tuple) ) or not len(__A ) == 2 or not isinstance(element[0], __A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: UpperCAmelCase : Union[str, Any] = element[1] elif first_field is not None: UpperCAmelCase : Union[str, Any] = first_field else: for field in class_fields: UpperCAmelCase : Optional[Any] = getattr(self, field.name ) if v is not None: UpperCAmelCase : Optional[int] = v def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Any, *__A : Dict, **__A : str ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str], __A : List[str] ): if isinstance(__A, __A ): UpperCAmelCase : int = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__A, __A ) super().__setattr__(__A, __A ) def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ): # Will raise a KeyException if needed super().__setitem__(__A, __A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__A, __A ) def __magic_name__ ( self : List[str] ): return tuple(self[k] for k in self.keys() ) class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def __magic_name__ ( cls : List[Any], __A : Tuple ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """longest""" UpperCamelCase = """max_length""" UpperCamelCase = """do_not_pad""" class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """pt""" UpperCamelCase = """tf""" UpperCamelCase = """np""" UpperCamelCase = """jax""" class __UpperCAmelCase : def __init__( self : Any, __A : List[ContextManager] ): UpperCAmelCase : Tuple = context_managers UpperCAmelCase : Tuple = ExitStack() def __enter__( self : Any ): for context_manager in self.context_managers: self.stack.enter_context(__A ) def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ): self.stack.__exit__(*__A, **__A ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : int = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( UpperCAmelCase : Dict ) -> Any: UpperCAmelCase : List[Any] = model_class.__name__ UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase ) if framework == "tf": UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]: def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ): for k, v in d.items(): UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k if v and isinstance(UpperCAmelCase , UpperCAmelCase ): yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) @contextmanager def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]: if is_numpy_array(UpperCAmelCase ): return np.transpose(UpperCAmelCase , axes=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.T if axes is None else array.permute(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.reshape(*UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.reshape(UpperCAmelCase , UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.reshape(UpperCAmelCase , UpperCAmelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any: if is_numpy_array(UpperCAmelCase ): return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str: if is_numpy_array(UpperCAmelCase ): return np.expand_dims(UpperCAmelCase , UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.unsqueeze(dim=UpperCAmelCase ) elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : Dict ) -> List[str]: if is_numpy_array(UpperCAmelCase ): return np.size(UpperCAmelCase ) elif is_torch_tensor(UpperCAmelCase ): return array.numel() elif is_tf_tensor(UpperCAmelCase ): import tensorflow as tf return tf.size(UpperCAmelCase ) elif is_jax_tensor(UpperCAmelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' ) def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict: for key, value in auto_map.items(): if isinstance(UpperCAmelCase , (tuple, list) ): UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase : List[Any] = f'''{repo_id}--{value}''' return auto_map def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCAmelCase ): UpperCAmelCase : Any = base_class.__module__ UpperCAmelCase : Dict = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
336
0
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def __lowerCamelCase ( A__ , A__ = True , A__ = math.inf , A__ = -math.inf , A__ = math.inf , A__ = -math.inf , A__ = False , A__ = 100 , A__ = 0.01 , A__ = 1 , ) -> Any: """simple docstring""" UpperCamelCase = False UpperCamelCase = search_prob UpperCamelCase = start_temperate UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = None while not search_end: UpperCamelCase = current_state.score() if best_state is None or current_score > best_state.score(): UpperCamelCase = current_state scores.append(A__ ) iterations += 1 UpperCamelCase = None UpperCamelCase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to UpperCamelCase = random.randint(0 , len(A__ ) - 1 ) # picking a random neighbor UpperCamelCase = neighbors.pop(A__ ) UpperCamelCase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: UpperCamelCase = change * -1 # in case we are finding minimum if change > 0: # improves the solution UpperCamelCase = picked_neighbor else: UpperCamelCase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability UpperCamelCase = picked_neighbor UpperCamelCase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor UpperCamelCase = True else: UpperCamelCase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(A__ ) , A__ ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def __lowerCamelCase ( A__ , A__ ) -> Union[str, Any]: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : Optional[Any] = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _lowerCamelCase : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : List[Any] = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def __lowerCamelCase ( A__ , A__ ) -> int: """simple docstring""" return (3 * x**2) - (6 * y) _lowerCamelCase : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : Tuple = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'''{local_min.score()}''' ) _lowerCamelCase : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _lowerCamelCase : str = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'''{local_min.score()}''' )
28
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LayoutLMTokenizer UpperCamelCase = LayoutLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def __magic_name__ ( self : Any ): super().setUp() UpperCAmelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__ ( self : Union[str, Any], **__A : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A ) def __magic_name__ ( self : Optional[int], __A : int ): UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running''' UpperCAmelCase : Optional[int] = '''unwanted, running''' return input_text, output_text def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] ) def __magic_name__ ( self : Optional[int] ): pass
336
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Dict = (DDIMParallelScheduler,) _snake_case : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0)) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : Dict = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**_UpperCamelCase ) return config def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int: UpperCAmelCase_ : int = self.scheduler_classes[0] UpperCAmelCase_ : Dict = self.get_scheduler_config(**_UpperCamelCase ) UpperCAmelCase_ : Dict = scheduler_class(**_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 1_0, 0.0 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(_UpperCamelCase ) for t in scheduler.timesteps: UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample return sample def __UpperCAmelCase ( self ) -> List[str]: for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCamelCase ) UpperCAmelCase_ : Any = self.scheduler_classes[0] UpperCAmelCase_ : Dict = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __UpperCAmelCase ( self ) -> Optional[Any]: for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Any: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: self.check_over_configs(thresholding=_UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , ) def __UpperCAmelCase ( self ) -> int: for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=_UpperCamelCase , num_inference_steps=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_UpperCamelCase , eta=_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.scheduler_classes[0] UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[Any] = self.scheduler_classes[0] UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 1_0, 0.0 scheduler.set_timesteps(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter UpperCAmelCase_ : str = self.dummy_sample_deter + 0.1 UpperCAmelCase_ : List[Any] = self.dummy_sample_deter - 0.1 UpperCAmelCase_ : List[str] = samplea.shape[0] UpperCAmelCase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase ) UpperCAmelCase_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.full_loop() UpperCAmelCase_ : List[str] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.full_loop(prediction_type='v_prediction' ) UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def __UpperCAmelCase ( self ) -> Tuple: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def __UpperCAmelCase ( self ) -> Union[str, Any]: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 ) UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) ) UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
29
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : int = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = mask_ratio UpperCAmelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Optional[Any] ): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ): UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A ) UpperCAmelCase : Tuple = model(__A, training=__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ): UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A ) UpperCAmelCase : int = model(__A, training=__A ) # expected sequence length = num_patches UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A ) UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(__A, training=__A ) UpperCAmelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = TFViTMAEModelTester(self ) UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) ) def __magic_name__ ( self : str ): UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(__A ) UpperCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __magic_name__ ( self : int ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : Dict = model(__A, noise=__A ) UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A ) UpperCAmelCase : Dict = outputs_dict[0].numpy() UpperCAmelCase : Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 ) def __magic_name__ ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A : Union[str, Any] ): UpperCAmelCase : str = {} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): UpperCAmelCase : Tuple = v.numpy() else: UpperCAmelCase : str = np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : Any = self._prepare_for_class(__A, __A ) UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A ) UpperCAmelCase : str = model(__A, noise=__A ) UpperCAmelCase : str = model(**__A, noise=__A ) self.assert_outputs_same(__A, __A ) def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ): # make masks reproducible np.random.seed(2 ) UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : int = tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : List[Any] = tf_noise super().check_pt_tf_models(__A, __A, __A ) def __magic_name__ ( self : str ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__A, __A ),) if isinstance(__A, __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A, '''_keras_serializable''', __A ) } UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(__A ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : Tuple = main_layer_class(__A ) UpperCAmelCase : int = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) ) UpperCAmelCase : List[Any] = model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' ) model.save(__A ) UpperCAmelCase : List[str] = tf.keras.models.load_model( __A, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A, tf.keras.Model ) UpperCAmelCase : Tuple = model(__A ) self.assert_outputs_same(__A, __A ) @slow def __magic_name__ ( self : Dict ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(__A ) UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A ) UpperCAmelCase : Union[str, Any] = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy() UpperCAmelCase : Union[str, Any] = 0 else: UpperCAmelCase : Optional[int] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A, saved_model=__A ) UpperCAmelCase : Dict = model_class.from_pretrained(__A ) UpperCAmelCase : str = model(__A, noise=__A ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy() UpperCAmelCase : Dict = 0 else: UpperCAmelCase : Any = after_outputs['''logits'''].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A, 1E-5 ) def __magic_name__ ( self : Optional[Any] ): # make mask reproducible np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) UpperCAmelCase : int = self._prepare_for_class(__A, __A ) UpperCAmelCase : List[Any] = model(__A, noise=__A ) UpperCAmelCase : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) UpperCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : str = model_class.from_config(model.config ) UpperCAmelCase : List[str] = new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Tuple = new_model(__A, noise=__A ) self.assert_outputs_same(__A, __A ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __magic_name__ ( self : Tuple ): pass @slow def __magic_name__ ( self : str ): UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def a__ ( ) -> Dict: UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[str] ): return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __magic_name__ ( self : str ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : Optional[int] = ViTMAEConfig() UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : Optional[int] = model(**__A, noise=__A ) # verify the logits UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
336
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
31
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
0
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Union[str, Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' UpperCAmelCase_ : str = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' UpperCAmelCase_ : int = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[str] , __A : List[Any]=False , __A : Tuple=False , __A : Union[str, Any]=True , __A : List[str]=False , __A : Optional[int]="dummy_doc" ) -> Any: """simple docstring""" a_ : Any = {doc: key_lines} a_ : List[Any] = {doc: sys_lines} a_ : Union[str, Any] = {} a_ : int = 0 a_ : List[Any] = 0 a_ : Union[str, Any] = 0 a_ : Union[str, Any] = 0 a_ : int = 0 a_ : Optional[int] = 0 a_ , a_ : Optional[int] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A ) key_singletons_num += singletons_num if NP_only or min_span: a_ : List[Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) a_ , a_ : List[str] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A ) sys_singletons_num += singletons_num if NP_only or min_span: a_ : int = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) if remove_nested: a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters a_ : Any = reader.get_mention_assignments(__A , __A ) a_ : List[str] = reader.get_mention_assignments(__A , __A ) a_ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : str , __A : str , __A : List[str] , __A : Any , __A : Optional[int] , __A : Dict ) -> Optional[int]: """simple docstring""" a_ : Tuple = get_coref_infos(__A , __A , __A , __A , __A , __A ) a_ : Optional[int] = {} a_ : Union[str, Any] = 0 a_ : str = 0 for name, metric in metrics: a_ , a_ , a_ : Any = evaluator.evaluate_documents(__A , __A , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: a_ : List[Any] = (conll / 3) * 1_00 logger.info(F"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]: """simple docstring""" a_ : Union[str, Any] = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: a_ : str = line.split()[5] if not parse_col == "-": a_ : Optional[int] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> List[str]: a_ : str = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: a_ : Tuple = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" a_ : List[str] = evaluate( key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , ) return score
32
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCAmelCase : def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ): UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Any = hidden_act UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope UpperCAmelCase : List[str] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : str = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self : Any ): UpperCAmelCase : Dict = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, ) def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ): UpperCAmelCase : int = ViTHybridModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ): UpperCAmelCase : str = self.type_sequence_label_size UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = ViTHybridModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 ) def __magic_name__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ): pass def __magic_name__ ( self : int ): UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A, nn.Linear ) ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(__A ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : str = [*signature.parameters.keys()] UpperCAmelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = _config_zero_init(__A ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(config=__A ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def __magic_name__ ( self : List[str] ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> Tuple: UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __A ) UpperCAmelCase : Tuple = self.default_image_processor UpperCAmelCase : int = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**__A ) # verify the logits UpperCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow @require_accelerate def __magic_name__ ( self : Dict ): UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ) UpperCAmelCase : Dict = model(**__A ) UpperCAmelCase : Any = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Dict = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
336
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __A : List[str] = logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Any = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : str , **A : List[Any] ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : List[str] = deprecated_arg[3:] setattr(self , A , not kwargs.pop(A ) ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript ) lowercase_ : Tuple = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) lowercase_ : Any = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**A ) SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Trace the models using torchscript"} ) SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) SCREAMING_SNAKE_CASE_ : str = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def A ( self : Optional[Any] ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: lowercase_ : List[Any] = torch.device('''cpu''' ) lowercase_ : Optional[int] = 0 elif is_torch_tpu_available(): lowercase_ : Optional[Any] = xm.xla_device() lowercase_ : Union[str, Any] = 0 else: lowercase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowercase_ : str = torch.cuda.device_count() return device, n_gpu @property def A ( self : Any ) -> str: return is_torch_tpu_available() and self.tpu @property def A ( self : str ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def A ( self : int ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def A ( self : List[str] ) -> str: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def A ( self : List[str] ) -> Optional[Any]: return self.n_gpu > 0
33
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a__ ( ) -> tuple[list[int], int]: UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )] UpperCAmelCase : Any = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Any = make_dataset() def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]: arr.sort() UpperCAmelCase : Tuple = len(UpperCAmelCase ) for i in range(n - 1 ): UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a__ ( ) -> tuple[float, float]: UpperCAmelCase : Union[str, Any] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase : Tuple = ''' triplet_sum1(*dataset) ''' UpperCAmelCase : List[str] = ''' triplet_sum2(*dataset) ''' UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : int = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
336
0
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case_ (_a : Optional[int] , _a : List[Any] , _a : int ): UpperCAmelCase = AutoConfig.from_pretrained(_a ) UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=_a ) UpperCAmelCase = checkpoints.load_tax_checkpoint(_a ) UpperCAmelCase = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": UpperCAmelCase = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCAmelCase = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): UpperCAmelCase = F"layers_{str(_a )}" # Self-Attention UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCAmelCase = flax_model.params['''encoder''']['''block'''][str(_a )]['''layer'''] UpperCAmelCase = tax_attention_key UpperCAmelCase = tax_attention_out UpperCAmelCase = tax_attention_query UpperCAmelCase = tax_attention_value UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: UpperCAmelCase = tax_mlp_wi_a UpperCAmelCase = tax_mlp_wi_a else: UpperCAmelCase = tax_mlp_wi UpperCAmelCase = tax_mlp_wo UpperCAmelCase = tax_mlp_layer_norm UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: UpperCAmelCase = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning UpperCAmelCase = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCAmelCase = F"layers_{str(_a )}" # Self-Attention UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] UpperCAmelCase = tax_enc_dec_attention_module['''key''']['''kernel'''] UpperCAmelCase = tax_enc_dec_attention_module['''out''']['''kernel'''] UpperCAmelCase = tax_enc_dec_attention_module['''query''']['''kernel'''] UpperCAmelCase = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCAmelCase = flax_model.params['''decoder''']['''block'''][str(_a )]['''layer'''] UpperCAmelCase = tax_attention_key UpperCAmelCase = tax_attention_out UpperCAmelCase = tax_attention_query UpperCAmelCase = tax_attention_value UpperCAmelCase = tax_pre_attention_layer_norm UpperCAmelCase = tax_enc_dec_attention_key UpperCAmelCase = tax_enc_dec_attention_out UpperCAmelCase = tax_enc_dec_attention_query UpperCAmelCase = tax_enc_dec_attention_value UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: UpperCAmelCase = tax_mlp_wi_a UpperCAmelCase = tax_mlp_wi_a else: UpperCAmelCase = tax_mlp_wi UpperCAmelCase = tax_mlp_wo UpperCAmelCase = txa_mlp_layer_norm UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization UpperCAmelCase = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] UpperCAmelCase = txa_decoder_norm # Only for layer 0: UpperCAmelCase = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings UpperCAmelCase = tax_model['''target''']['''token_embedder''']['''embedding'''] UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCAmelCase = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(_a ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) A =parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
34
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCAmelCase : def __magic_name__ ( self : int, __A : Dict ): raise NotImplementedError() def __magic_name__ ( self : int ): raise NotImplementedError() class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ): UpperCAmelCase : List[str] = tokenizer UpperCAmelCase : str = skip_prompt UpperCAmelCase : List[str] = decode_kwargs # variables used in the streaming process UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = 0 UpperCAmelCase : Union[str, Any] = True def __magic_name__ ( self : Dict, __A : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: UpperCAmelCase : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] UpperCAmelCase : int = [] UpperCAmelCase : int = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase : Union[str, Any] = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def __magic_name__ ( self : str ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs ) UpperCAmelCase : Dict = text[self.print_len :] UpperCAmelCase : List[Any] = [] UpperCAmelCase : List[Any] = 0 else: UpperCAmelCase : Dict = '''''' UpperCAmelCase : str = True self.on_finalized_text(__A, stream_end=__A ) def __magic_name__ ( self : List[str], __A : str, __A : bool = False ): print(__A, flush=__A, end='''''' if not stream_end else None ) def __magic_name__ ( self : List[Any], __A : Optional[int] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ): super().__init__(__A, __A, **__A ) UpperCAmelCase : Dict = Queue() UpperCAmelCase : Any = None UpperCAmelCase : Any = timeout def __magic_name__ ( self : Dict, __A : str, __A : bool = False ): self.text_queue.put(__A, timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout ) def __iter__( self : int ): return self def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
336
0
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: if isinstance(_lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class UpperCAmelCase_ : """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ): pass def lowerCamelCase ( self : Optional[int] ): pass def lowerCamelCase ( self : Optional[Any] ): pass def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] ): snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel(snake_case_ ) snake_case__ : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any]=None , **snake_case_ : Union[str, Any] ): snake_case__ , snake_case__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str=None , **snake_case_ : Union[str, Any] ): snake_case__ , snake_case__ : Dict = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} snake_case__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ ) snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int=None , **snake_case_ : str ): snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Any = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) snake_case__ : int = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ ) snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) snake_case__ : Tuple = after_output[0].numpy() snake_case__ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case_ , 1E-5 ) def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , **snake_case_ : List[str] ): snake_case__ , snake_case__ : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model( input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ ) snake_case__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : Optional[Any] = to_atuple(vision_model.config.image_size ) snake_case__ : str = to_atuple(vision_model.config.patch_size ) snake_case__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Any = output.text_model_output.attentions self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ): snake_case__ : List[Any] = np.abs((a - b) ).max() self.assertLessEqual(snake_case_ , snake_case_ , f"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCamelCase ( self : Any ): snake_case__ : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**snake_case_ ) def lowerCamelCase ( self : str ): snake_case__ : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ ) def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() self.check_save_load(**snake_case_ ) def lowerCamelCase ( self : int ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**snake_case_ ) @slow def lowerCamelCase ( self : str ): snake_case__ , snake_case__ : Any = self.get_pretrained_model_and_inputs() snake_case__ : Union[str, Any] = model_a(**snake_case_ ) snake_case__ : Union[str, Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(snake_case_ ) snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ ) snake_case__ : int = model_a(**snake_case_ ) snake_case__ : Dict = after_outputs[0].numpy() snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case_ , 1E-5 ) @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Optional[int] ): snake_case__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) snake_case__ : Optional[int] = 13 snake_case__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : Any = random_attention_mask([batch_size, 4] ) snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] ): snake_case__ : Union[str, Any] = TFViTModel(snake_case_ , name="""vision_model""" ) snake_case__ : Any = TFBertModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : str ): snake_case__ : Union[str, Any] = TFViTModelTester(self ) snake_case__ : str = TFBertModelTester(self ) snake_case__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() snake_case__ : Any = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Optional[Any] = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Dict ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) snake_case__ : Any = 13 snake_case__ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] ) snake_case__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : List[str] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ): snake_case__ , snake_case__ : Any = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : Union[str, Any] = model( input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ ) snake_case__ : str = output.vision_model_output.attentions self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) snake_case__ : Tuple = to_atuple(vision_model.config.image_size ) snake_case__ : List[Any] = to_atuple(vision_model.config.patch_size ) snake_case__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Dict = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): snake_case__ : Union[str, Any] = TFDeiTModel(snake_case_ , name="""vision_model""" ) snake_case__ : Tuple = TFRobertaModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : List[str] ): snake_case__ : int = TFDeiTModelTester(self ) snake_case__ : Union[str, Any] = TFRobertaModelTester(self ) snake_case__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs() snake_case__ : str = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Any = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Dict = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) snake_case__ : Tuple = 13 snake_case__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : str = random_attention_mask([batch_size, 4] ) snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : int ): snake_case__ : List[str] = TFCLIPVisionModel(snake_case_ , name="""vision_model""" ) snake_case__ : Optional[Any] = TFBertModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : Dict ): snake_case__ : int = TFCLIPVisionModelTester(self ) snake_case__ : Optional[int] = TFBertModelTester(self ) snake_case__ : str = clip_model_tester.prepare_config_and_inputs() snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=snake_case_ ) snake_case__ : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) snake_case__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case__ : List[str] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case_ , padding=snake_case_ , return_tensors="""np""" ) snake_case__ : int = model(**snake_case_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) snake_case__ : Optional[int] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1E-3 ) )
35
import numpy # List of input, output pairs _lowerCamelCase : Dict = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _lowerCamelCase : Dict = [2, 4, 1, 5] _lowerCamelCase : Dict = len(train_data) _lowerCamelCase : int = 0.0_0_9 def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict: return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output( UpperCAmelCase , UpperCAmelCase ) def a__ ( UpperCAmelCase : int ) -> Any: UpperCAmelCase : str = 0 for i in range(len(UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict: UpperCAmelCase : Optional[int] = 0 for i in range(UpperCAmelCase ): if index == -1: summation_value += _error(UpperCAmelCase ) else: summation_value += _error(UpperCAmelCase ) * train_data[i][0][index] return summation_value def a__ ( UpperCAmelCase : Dict ) -> Dict: UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m return cost_derivative_value def a__ ( ) -> List[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : List[str] = 0.000002 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase ) ): UpperCAmelCase : List[str] = get_cost_derivative(i - 1 ) UpperCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ): break UpperCAmelCase : int = temp_parameter_vector print(('''Number of iterations:''', j) ) def a__ ( ) -> List[Any]: for i in range(len(UpperCAmelCase ) ): print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
336
0
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _snake_case = "pt" elif is_tf_available(): _snake_case = "tf" else: _snake_case = "jax" class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = ByTaTokenizer lowerCamelCase__ = False def snake_case__ ( self): '''simple docstring''' super().setUp() _lowerCAmelCase : Union[str, Any] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def snake_case__ ( self): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small") def snake_case__ ( self, **__a): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a, __a=False, __a=20, __a=5): '''simple docstring''' _lowerCAmelCase : List[str] = [] for i in range(len(__a)): try: _lowerCAmelCase : Tuple = tokenizer.decode([i], clean_up_tokenization_spaces=__a) except UnicodeDecodeError: pass toks.append((i, tok)) _lowerCAmelCase : Tuple = list(filter(lambda __a: re.match(R"^[ a-zA-Z]+$", t[1]), __a)) _lowerCAmelCase : str = list(filter(lambda __a: [t[0]] == tokenizer.encode(t[1], add_special_tokens=__a), __a)) if max_length is not None and len(__a) > max_length: _lowerCAmelCase : int = toks[:max_length] if min_length is not None and len(__a) < min_length and len(__a) > 0: while len(__a) < min_length: _lowerCAmelCase : Dict = toks + toks # toks_str = [t[1] for t in toks] _lowerCAmelCase : int = [t[0] for t in toks] # Ensure consistency _lowerCAmelCase : int = tokenizer.decode(__a, clean_up_tokenization_spaces=__a) if " " not in output_txt and len(__a) > 1: _lowerCAmelCase : Union[str, Any] = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=__a) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=__a) ) if with_prefix_space: _lowerCAmelCase : Optional[int] = " " + output_txt _lowerCAmelCase : Tuple = tokenizer.encode(__a, add_special_tokens=__a) return output_txt, output_ids def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = self.ta_base_tokenizer _lowerCAmelCase : Any = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) _lowerCAmelCase : int = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.ta_base_tokenizer _lowerCAmelCase : int = "Unicode €." _lowerCAmelCase : Dict = tokenizer(__a) _lowerCAmelCase : Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], __a) # decoding _lowerCAmelCase : str = tokenizer.decode(__a) self.assertEqual(__a, "Unicode €.</s>") _lowerCAmelCase : Dict = tokenizer("e è é ê ë") _lowerCAmelCase : Tuple = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], __a) # decoding _lowerCAmelCase : Optional[Any] = tokenizer.decode(__a) self.assertEqual(__a, "e è é ê ë</s>") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>") def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[Any] = self.ta_base_tokenizer _lowerCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off _lowerCAmelCase : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _lowerCAmelCase : Tuple = tokenizer(__a, padding=__a, return_tensors=__a) self.assertIsInstance(__a, __a) if FRAMEWORK != "jax": _lowerCAmelCase : int = list(batch.input_ids.numpy()[0]) else: _lowerCAmelCase : Tuple = list(batch.input_ids.tolist()[0]) self.assertListEqual(__a, __a) self.assertEqual((2, 37), batch.input_ids.shape) self.assertEqual((2, 37), batch.attention_mask.shape) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = self.ta_base_tokenizer _lowerCAmelCase : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."] _lowerCAmelCase : int = tokenizer(__a, padding=__a, return_tensors=__a) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", __a) self.assertIn("attention_mask", __a) self.assertNotIn("decoder_input_ids", __a) self.assertNotIn("decoder_attention_mask", __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.ta_base_tokenizer _lowerCAmelCase : List[str] = [ "Summary of the text.", "Another summary.", ] _lowerCAmelCase : Optional[int] = tokenizer( text_target=__a, max_length=32, padding="max_length", truncation=__a, return_tensors=__a) self.assertEqual(32, targets["input_ids"].shape[1]) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = self.ta_base_tokenizer _lowerCAmelCase : str = ["A long paragraph for summarization. </s>"] _lowerCAmelCase : List[str] = ["Summary of the text. </s>"] # fmt: off _lowerCAmelCase : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _lowerCAmelCase : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _lowerCAmelCase : Tuple = tokenizer(__a, text_target=__a) self.assertEqual(__a, batch["input_ids"][0]) self.assertEqual(__a, batch["labels"][0]) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test _lowerCAmelCase : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc _lowerCAmelCase : Any = tempfile.mkdtemp() _lowerCAmelCase : Tuple = " He is very happy, UNwant\u00E9d,running" _lowerCAmelCase : int = tokenizer.encode(__a, add_special_tokens=__a) tokenizer.save_pretrained(__a) _lowerCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(__a) _lowerCAmelCase : Optional[int] = after_tokenizer.encode(__a, add_special_tokens=__a) self.assertListEqual(__a, __a) shutil.rmtree(__a) _lowerCAmelCase : Optional[int] = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc _lowerCAmelCase : str = tempfile.mkdtemp() _lowerCAmelCase : Union[str, Any] = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) _lowerCAmelCase : str = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) _lowerCAmelCase : Optional[int] = tokenizer.encode(__a, add_special_tokens=__a) tokenizer.save_pretrained(__a) _lowerCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__a) _lowerCAmelCase : str = after_tokenizer.encode(__a, add_special_tokens=__a) self.assertListEqual(__a, __a) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) _lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(__a, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__a) with open(os.path.join(__a, "special_tokens_map.json"), encoding="utf-8") as json_file: _lowerCAmelCase : Union[str, Any] = json.load(__a) with open(os.path.join(__a, "tokenizer_config.json"), encoding="utf-8") as json_file: _lowerCAmelCase : Union[str, Any] = json.load(__a) _lowerCAmelCase : str = [f"<extra_id_{i}>" for i in range(125)] _lowerCAmelCase : Dict = added_tokens_extra_ids + [ "an_additional_special_token" ] _lowerCAmelCase : Optional[Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(__a, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(__a, __a) with open(os.path.join(__a, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(__a, __a) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowerCAmelCase : Dict = tokenizer_class.from_pretrained( __a, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowerCAmelCase : List[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=__a)] _lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained( __a, additional_special_tokens=__a, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])), ) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__a) _lowerCAmelCase : List[Any] = tokenizer_class.from_pretrained(__a) self.assertTrue(tokenizer.decode([255]) == "") def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.get_tokenizers(fast=__a, do_lower_case=__a) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): _lowerCAmelCase : Tuple = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] _lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(__a) self.assertIsInstance(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): _lowerCAmelCase : Tuple = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens( __a, skip_special_tokens=__a) for attr in attributes_list: setattr(__a, attr + "_id", __a) self.assertEqual(getattr(__a, __a), __a) self.assertEqual(getattr(__a, attr + "_id"), __a) setattr(__a, attr + "_id", __a) self.assertEqual(getattr(__a, __a), __a) self.assertEqual(getattr(__a, attr + "_id"), __a) setattr(__a, "additional_special_tokens_ids", []) self.assertListEqual(getattr(__a, "additional_special_tokens"), []) self.assertListEqual(getattr(__a, "additional_special_tokens_ids"), []) setattr(__a, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(__a, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(__a, "additional_special_tokens_ids"), [token_id_to_test_setters])
36
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
0
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0.999 , UpperCamelCase="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase__ : Optional[Any] = [] for i in range(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = i / num_diffusion_timesteps lowerCAmelCase__ : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) ) return torch.tensor(UpperCamelCase , dtype=torch.floataa ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[Any] = [e.name for e in KarrasDiffusionSchedulers] __lowercase : str = 2 @register_to_config def __init__( self ,__UpperCAmelCase = 1000 ,__UpperCAmelCase = 0.0_0_0_8_5 ,__UpperCAmelCase = 0.0_1_2 ,__UpperCAmelCase = "linear" ,__UpperCAmelCase = None ,__UpperCAmelCase = "epsilon" ,__UpperCAmelCase = "linspace" ,__UpperCAmelCase = 0 ,) -> List[Any]: if trained_betas is not None: lowerCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCAmelCase ,dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase__ : List[str] = torch.linspace(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : Union[str, Any] = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__UpperCAmelCase ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : Any = betas_for_alpha_bar(__UpperCAmelCase ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowerCAmelCase__ : List[Any] = 1.0 - self.betas lowerCAmelCase__ : Optional[Any] = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Tuple: if schedule_timesteps is None: lowerCAmelCase__ : int = self.timesteps lowerCAmelCase__ : str = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase__ : Dict = 1 if len(__UpperCAmelCase ) > 1 else 0 else: lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep lowerCAmelCase__ : Optional[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase_ ( self ) -> str: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor: lowerCAmelCase__ : int = self.index_for_timestep(__UpperCAmelCase ) if self.state_in_first_order: lowerCAmelCase__ : Tuple = self.sigmas[step_index] else: lowerCAmelCase__ : Any = self.sigmas_interpol[step_index] lowerCAmelCase__ : List[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> int: lowerCAmelCase__ : Tuple = num_inference_steps lowerCAmelCase__ : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase__ : Dict = np.linspace(0 ,num_train_timesteps - 1 ,__UpperCAmelCase ,dtype=__UpperCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase__ : int = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Dict = (np.arange(0 ,__UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase__ : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Tuple = (np.arange(__UpperCAmelCase ,0 ,-step_ratio )).round().copy().astype(__UpperCAmelCase ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowerCAmelCase__ : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase__ : List[str] = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = np.interp(__UpperCAmelCase ,np.arange(0 ,len(__UpperCAmelCase ) ) ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) # interpolate sigmas lowerCAmelCase__ : Dict = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() lowerCAmelCase__ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase__ : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__UpperCAmelCase ).startswith("""mps""" ): # mps does not support float64 lowerCAmelCase__ : List[str] = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=torch.floataa ) else: lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ) # interpolate timesteps lowerCAmelCase__ : Tuple = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=timesteps.dtype ) lowerCAmelCase__ : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() lowerCAmelCase__ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] ) lowerCAmelCase__ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase__ : Any = defaultdict(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: # get log sigma lowerCAmelCase__ : str = sigma.log() # get distribution lowerCAmelCase__ : Union[str, Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range lowerCAmelCase__ : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowerCAmelCase__ : List[str] = low_idx + 1 lowerCAmelCase__ : Any = self.log_sigmas[low_idx] lowerCAmelCase__ : Optional[Any] = self.log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase__ : List[Any] = (low - log_sigma) / (low - high) lowerCAmelCase__ : Union[str, Any] = w.clamp(0 ,1 ) # transform interpolation to time range lowerCAmelCase__ : Tuple = (1 - w) * low_idx + w * high_idx lowerCAmelCase__ : Union[str, Any] = t.view(sigma.shape ) return t @property def UpperCAmelCase_ ( self ) -> Tuple: return self.sample is None def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,) -> Union[SchedulerOutput, Tuple]: lowerCAmelCase__ : Optional[Any] = self.index_for_timestep(__UpperCAmelCase ) # advance index counter by 1 lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase__ : int = self.sigmas[step_index] lowerCAmelCase__ : Optional[int] = self.sigmas_interpol[step_index + 1] lowerCAmelCase__ : List[str] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowerCAmelCase__ : Optional[int] = self.sigmas[step_index - 1] lowerCAmelCase__ : Dict = self.sigmas_interpol[step_index] lowerCAmelCase__ : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase__ : Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase__ : Dict = sigma_interpol - sigma_hat # store for 2nd order step lowerCAmelCase__ : Optional[int] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowerCAmelCase__ : Any = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowerCAmelCase__ : Optional[Any] = sigma_next - sigma_hat lowerCAmelCase__ : Tuple = self.sample lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase__ : List[Any] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ): # mps does not support float64 lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: lowerCAmelCase__ : List[str] = self.timesteps.to(original_samples.device ) lowerCAmelCase__ : Union[str, Any] = timesteps.to(original_samples.device ) lowerCAmelCase__ : List[Any] = [self.index_for_timestep(__UpperCAmelCase ,__UpperCAmelCase ) for t in timesteps] lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase__ : Dict = sigma.unsqueeze(-1 ) lowerCAmelCase__ : str = original_samples + noise * sigma return noisy_samples def __len__( self ) -> int: return self.config.num_train_timesteps
37
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any: UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''), (f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''), (f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''), (f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any: for i in range(config.num_hidden_layers ): UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) UpperCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : str = q_bias UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) UpperCAmelCase : str = gamma_a UpperCAmelCase : Dict = gamma_a def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase ) UpperCAmelCase : str = val def a__ ( ) -> Optional[int]: UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]: UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase : List[Any] = 1_024 UpperCAmelCase : Optional[Any] = 4_096 UpperCAmelCase : Any = 24 UpperCAmelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : List[Any] = '''huggingface/label-files''' UpperCAmelCase : Any = '''rvlcdip-id2label.json''' UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image UpperCAmelCase : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str = encoding['''pixel_values'''] UpperCAmelCase : Any = model(UpperCAmelCase ) UpperCAmelCase : Optional[Any] = outputs.logits # verify logits UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
336
0
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int ) -> Tuple: """simple docstring""" return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any="attention" ) -> str: """simple docstring""" UpperCamelCase :str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCamelCase :Dict = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) UpperCamelCase :List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCamelCase :List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) UpperCamelCase :int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) UpperCamelCase :int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Any=False ) -> Tuple: """simple docstring""" if split_mlp_wi: UpperCamelCase :Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] UpperCamelCase :Dict = (wi_a, wi_a) else: UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> Dict: """simple docstring""" UpperCamelCase :Dict = traverse_util.flatten_dict(variables["""target"""] ) UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCamelCase :str = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , __magic_name__ ) UpperCamelCase :Optional[int] = collections.OrderedDict() # Shared embeddings. UpperCamelCase :Dict = old["""token_embedder/embedding"""] # Encoder. for i in range(__magic_name__ ): # Block i, layer 0 (Self Attention). UpperCamelCase :Optional[int] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" ) UpperCamelCase :Union[str, Any] = layer_norm UpperCamelCase :Tuple = k.T UpperCamelCase :Optional[Any] = o.T UpperCamelCase :Dict = q.T UpperCamelCase :List[str] = v.T # Block i, layer 1 (MLP). UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" ) UpperCamelCase , UpperCamelCase :Dict = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ ) UpperCamelCase :List[Any] = layer_norm if split_mlp_wi: UpperCamelCase :Any = wi[0].T UpperCamelCase :Optional[Any] = wi[1].T else: UpperCamelCase :Optional[int] = wi.T UpperCamelCase :Any = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup( __magic_name__ , __magic_name__ , """encoder""" ).T UpperCamelCase :Tuple = old["""encoder/encoder_norm/scale"""] if not scalable_attention: UpperCamelCase :Dict = tax_relpos_bias_lookup( __magic_name__ , 0 , """encoder""" ).T UpperCamelCase :Tuple = tax_relpos_bias_lookup( __magic_name__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(__magic_name__ ): # Block i, layer 0 (Self Attention). UpperCamelCase :Dict = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" ) UpperCamelCase :int = layer_norm UpperCamelCase :Optional[Any] = k.T UpperCamelCase :Tuple = o.T UpperCamelCase :Optional[Any] = q.T UpperCamelCase :int = v.T # Block i, layer 1 (Cross Attention). UpperCamelCase :str = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" ) UpperCamelCase :str = layer_norm UpperCamelCase :List[str] = k.T UpperCamelCase :Optional[int] = o.T UpperCamelCase :Union[str, Any] = q.T UpperCamelCase :Tuple = v.T # Block i, layer 2 (MLP). UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" ) UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ ) UpperCamelCase :str = layer_norm if split_mlp_wi: UpperCamelCase :Any = wi[0].T UpperCamelCase :int = wi[1].T else: UpperCamelCase :List[str] = wi.T UpperCamelCase :Union[str, Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase :Union[str, Any] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T UpperCamelCase :int = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCamelCase :Optional[Any] = old["""decoder/logits_dense/kernel"""].T return new def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : bool ) -> List[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCamelCase :int = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCamelCase :List[str] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) UpperCamelCase :Optional[Any] = state_dict["""shared.weight"""] return state_dict def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[str] ) -> int: """simple docstring""" UpperCamelCase :Any = checkpoints.load_tax_checkpoint(__magic_name__ ) UpperCamelCase :int = convert_tax_to_pytorch( __magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ ) UpperCamelCase :Any = make_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ , strict=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Dict: """simple docstring""" UpperCamelCase :Optional[Any] = MTaConfig.from_json_file(__magic_name__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCamelCase :List[str] = UMTaEncoderModel(__magic_name__ ) else: UpperCamelCase :List[Any] = UMTaForConditionalGeneration(__magic_name__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__magic_name__ ) # Verify that we can load the checkpoint. model.from_pretrained(__magic_name__ ) print("""Done""" ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
38
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ): UpperCAmelCase : Any = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : str = use_attention_mask UpperCAmelCase : List[str] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : Any = num_choices def __magic_name__ ( self : str ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def __magic_name__ ( self : int ): UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __magic_name__ ( self : List[str] ): UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs UpperCAmelCase : Any = True UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = FlaxRobertaModelTester(self ) @slow def __magic_name__ ( self : Any ): for model_class_name in self.all_model_classes: UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A ) UpperCAmelCase : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
336
0
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _a = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _a = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _a = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _a = F'''down_blocks.{i}.resnets.{j}.''' _a = F'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _a = F'''down_blocks.{i}.attentions.{j}.''' _a = F'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _a = F'''up_blocks.{i}.resnets.{j}.''' _a = F'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _a = F'''up_blocks.{i}.attentions.{j}.''' _a = F'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _a = F'''down_blocks.{i}.downsamplers.0.conv.''' _a = F'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _a = F'''up_blocks.{i}.upsamplers.0.''' _a = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _a = '''mid_block.attentions.0.''' _a = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _a = F'''mid_block.resnets.{j}.''' _a = F'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" _UpperCAmelCase = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _UpperCAmelCase = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = v _UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _a = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _a = F'''encoder.down_blocks.{i}.resnets.{j}.''' _a = F'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _a = F'''down_blocks.{i}.downsamplers.0.''' _a = F'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _a = F'''up_blocks.{i}.upsamplers.0.''' _a = F'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _a = F'''decoder.up_blocks.{i}.resnets.{j}.''' _a = F'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _a = F'''mid_block.resnets.{i}.''' _a = F'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _a = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def __A ( __lowerCAmelCase )-> Optional[int]: """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def __A ( __lowerCAmelCase )-> Optional[int]: """simple docstring""" _UpperCAmelCase = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = v _UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()} _UpperCAmelCase = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"""mid.attn_1.{weight_name}.weight""" in k: print(F"""Reshaping {k} for SD format""" ) _UpperCAmelCase = reshape_weight_for_sd(__lowerCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _a = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _a = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _a = {'''q''': 0, '''k''': 1, '''v''': 2} def __A ( __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} _UpperCAmelCase = {} for k, v in text_enc_dict.items(): if ( k.endswith('.self_attn.q_proj.weight' ) or k.endswith('.self_attn.k_proj.weight' ) or k.endswith('.self_attn.v_proj.weight' ) ): _UpperCAmelCase = k[: -len('.q_proj.weight' )] _UpperCAmelCase = k[-len('q_proj.weight' )] if k_pre not in capture_qkv_weight: _UpperCAmelCase = [None, None, None] _UpperCAmelCase = v continue if ( k.endswith('.self_attn.q_proj.bias' ) or k.endswith('.self_attn.k_proj.bias' ) or k.endswith('.self_attn.v_proj.bias' ) ): _UpperCAmelCase = k[: -len('.q_proj.bias' )] _UpperCAmelCase = k[-len('q_proj.bias' )] if k_pre not in capture_qkv_bias: _UpperCAmelCase = [None, None, None] _UpperCAmelCase = v continue _UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) _UpperCAmelCase = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) _UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) _UpperCAmelCase = torch.cat(__lowerCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) _UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) _UpperCAmelCase = torch.cat(__lowerCAmelCase ) return new_state_dict def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" return text_enc_dict if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _a = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _a = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _a = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _a = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _a = load_file(unet_path, device='''cpu''') else: _a = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _a = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _a = load_file(vae_path, device='''cpu''') else: _a = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _a = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _a = load_file(text_enc_path, device='''cpu''') else: _a = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _a = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _a = convert_unet_state_dict(unet_state_dict) _a = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _a = convert_vae_state_dict(vae_state_dict) _a = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _a = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _a = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _a = convert_text_enc_state_dict_vaa(text_enc_dict) _a = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _a = convert_text_enc_state_dict(text_enc_dict) _a = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _a = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _a = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _a = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
39
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = {"vocab_file": "vocab.txt"} _lowerCamelCase : List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } _lowerCamelCase : List[Any] = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def a__ ( UpperCAmelCase : List[str] ) -> Any: with open(UpperCAmelCase , '''r''' ) as f: UpperCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ): super().__init__(**__A ) UpperCAmelCase : Tuple = load_vocab_file(__A ) UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Union[str, Any] = unk_token UpperCAmelCase : Optional[Any] = cls_token UpperCAmelCase : Optional[int] = pad_token UpperCAmelCase : Optional[int] = mask_token UpperCAmelCase : List[str] = eos_token UpperCAmelCase : Optional[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __magic_name__ ( self : Tuple, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : List[Any], __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ): return text.split() def __magic_name__ ( self : Optional[int], __A : Dict=False ): return len(self._id_to_token ) def __magic_name__ ( self : int ): return {token: i for i, token in enumerate(self.all_tokens )} def __magic_name__ ( self : Tuple, __A : str ): return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) ) def __magic_name__ ( self : Any, __A : int ): return self._id_to_token.get(__A, self.unk_token ) def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ): UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: mask += [0] * len(__A ) + [1] return mask def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ): UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__A, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __magic_name__ ( self : Dict ): return self.get_vocab_size(with_added_tokens=__A ) def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ): return super()._add_tokens(__A, special_tokens=__A )
336
0
"""simple docstring""" import re from filelock import FileLock try: import nltk __lowercase = True except (ImportError, ModuleNotFoundError): __lowercase = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def lowercase ( A_ )-> str: '''simple docstring''' re.sub("<n>" , "" , A_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(A_ ) )
40
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) ) class __UpperCAmelCase : def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ): UpperCAmelCase : Optional[int] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = depth_multiplier UpperCAmelCase : Union[str, Any] = depth_divisible_by UpperCAmelCase : Optional[Any] = min_depth UpperCAmelCase : List[str] = expand_ratio UpperCAmelCase : Dict = tf_padding UpperCAmelCase : str = output_stride UpperCAmelCase : Union[str, Any] = first_layer_is_expansion UpperCAmelCase : List[Any] = finegrained_output UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[str] = is_training UpperCAmelCase : Tuple = num_labels UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = scope def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ): return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ): UpperCAmelCase : Any = MobileNetVaModel(config=__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[Any] = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : Any = MobileNetVaForImageClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase : Optional[int] = model(__A, labels=__A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() UpperCAmelCase : Dict = model(__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCAmelCase : Optional[Any] = model(__A, labels=__A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[Any] = MobileNetVaModelTester(self ) UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__ ( self : Tuple ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__ ( self : Any ): pass def __magic_name__ ( self : Optional[int] ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(__A ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1], __A ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __magic_name__ ( self : int ): def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ): UpperCAmelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) ) UpperCAmelCase : Optional[Any] = outputs.hidden_states UpperCAmelCase : List[Any] = 1_6 self.assertEqual(len(__A ), __A ) UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(__A, __A, __A ) def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __magic_name__ ( self : Dict ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def a__ ( ) -> int: UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : str = model(**__A ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) ) @slow def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = model.to(__A ) UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**__A ) UpperCAmelCase : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape, __A ) UpperCAmelCase : Tuple = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ], device=__A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
336
0