code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9}, }, { 'framework': 'tensorflow', 'script': 'run_tf.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9}, }, ] ) class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :Optional[Any] ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="""utf-8""" ,check=_UpperCamelCase ,) assert hasattr(self ,"""env""" ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Dict=1 ): # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'''{self.env.base_job_name}-single''' ,instance_count=_UpperCamelCase ,instance_type=self.instance_type ,debugger_hook_config=_UpperCamelCase ,hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version="""py36""" ,) def a__ ( self :Union[str, Any] ,_UpperCamelCase :int ): TrainingJobAnalytics(_UpperCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) def a__ ( self :List[str] ): # create estimator snake_case_ : List[Any] = self.create_estimator() # run training estimator.fit() # result dataframe snake_case_ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_UpperCamelCase )
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
1
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :str ): return F'''gaussian_noise_s={seed}_shape={"_".join([str(_UpperCamelCase ) for s in shape] )}.npy''' def a__ ( self :Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any]=0 ,_UpperCamelCase :str=(4, 4, 6_4, 6_4) ,_UpperCamelCase :List[str]=False ): snake_case_ : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case_ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase ,_UpperCamelCase ) ) ,dtype=_UpperCamelCase ) return image def a__ ( self :Dict ,_UpperCamelCase :List[Any]=False ,_UpperCamelCase :Union[str, Any]="CompVis/stable-diffusion-v1-4" ): snake_case_ : Tuple = jnp.bfloataa if fpaa else jnp.floataa snake_case_ : Any = """bf16""" if fpaa else None snake_case_ , snake_case_ : Any = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase ,subfolder="""unet""" ,dtype=_UpperCamelCase ,revision=_UpperCamelCase ) return model, params def a__ ( self :Union[str, Any] ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :Tuple=(4, 7_7, 7_6_8) ,_UpperCamelCase :int=False ): snake_case_ : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case_ : Dict = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase ,_UpperCamelCase ) ) ,dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def a__ ( self :Optional[int] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Any ): snake_case_ , snake_case_ : List[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" ,fpaa=_UpperCamelCase ) snake_case_ : Optional[Any] = self.get_latents(_UpperCamelCase ,fpaa=_UpperCamelCase ) snake_case_ : Tuple = self.get_encoder_hidden_states(_UpperCamelCase ,fpaa=_UpperCamelCase ) snake_case_ : Optional[int] = model.apply( {"""params""": params} ,_UpperCamelCase ,jnp.array(_UpperCamelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=_UpperCamelCase ,).sample assert sample.shape == latents.shape snake_case_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) snake_case_ : int = jnp.array(_UpperCamelCase ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def a__ ( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Optional[int] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" ,fpaa=_UpperCamelCase ) snake_case_ : Tuple = self.get_latents(_UpperCamelCase ,shape=(4, 4, 9_6, 9_6) ,fpaa=_UpperCamelCase ) snake_case_ : Tuple = self.get_encoder_hidden_states(_UpperCamelCase ,shape=(4, 7_7, 1_0_2_4) ,fpaa=_UpperCamelCase ) snake_case_ : Union[str, Any] = model.apply( {"""params""": params} ,_UpperCamelCase ,jnp.array(_UpperCamelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=_UpperCamelCase ,).sample assert sample.shape == latents.shape snake_case_ : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) snake_case_ : List[str] = jnp.array(_UpperCamelCase ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-2 )
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self :List[Any] ,_UpperCamelCase :Tuple="</s>" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :str="<pad>" ,_UpperCamelCase :List[str]=1_2_5 ,_UpperCamelCase :List[str]=None ,**_UpperCamelCase :str ,): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ : List[str] = [F'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ : Optional[int] = len(set(filter(lambda _UpperCamelCase : bool("""extra_id""" in str(_UpperCamelCase ) ) ,_UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) snake_case_ : List[str] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else pad_token snake_case_ : Dict = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else eos_token snake_case_ : Optional[Any] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else unk_token super().__init__( eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,extra_ids=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = extra_ids snake_case_ : str = 2**8 # utf is 8 bits # define special tokens dict snake_case_ : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } snake_case_ : List[Any] = len(self.special_tokens_encoder ) snake_case_ : List[Any] = len(_UpperCamelCase ) for i, token in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = self.vocab_size + i - n snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a__ ( self :Union[str, Any] ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ,_UpperCamelCase :bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def a__ ( self :int ,_UpperCamelCase :List[int] ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def a__ ( self :Optional[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Union[str, Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a__ ( self :Optional[int] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : List[str] = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case_ : Union[str, Any] = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def a__ ( self :Optional[Any] ,_UpperCamelCase :str ): snake_case_ : List[str] = [chr(_UpperCamelCase ) for i in text.encode("""utf-8""" )] return tokens def a__ ( self :List[str] ,_UpperCamelCase :str ): if token in self.special_tokens_encoder: snake_case_ : List[str] = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: snake_case_ : Optional[Any] = self.added_tokens_encoder[token] elif len(_UpperCamelCase ) != 1: snake_case_ : Any = self.unk_token_id else: snake_case_ : int = ord(_UpperCamelCase ) + self._num_special_tokens return token_id def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ): if index in self.special_tokens_decoder: snake_case_ : str = self.special_tokens_decoder[index] else: snake_case_ : Tuple = chr(index - self._num_special_tokens ) return token def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ : Optional[int] = B"""""" for token in tokens: if token in self.special_tokens_decoder: snake_case_ : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: snake_case_ : Dict = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: snake_case_ : List[Any] = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: snake_case_ : Optional[int] = token.encode("""utf-8""" ) else: snake_case_ : List[Any] = bytes([ord(_UpperCamelCase )] ) bstring += tok_string snake_case_ : Union[str, Any] = bstring.decode("""utf-8""" ,errors="""ignore""" ) return string def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): return ()
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[Any] = FunnelConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : Dict = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) __A : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
8
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
1
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self :Tuple ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=1_3 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :List[Any]=3 ,_UpperCamelCase :int=True ,_UpperCamelCase :str=True ,_UpperCamelCase :Optional[Any]=3_2 ,_UpperCamelCase :Optional[int]=2 ,_UpperCamelCase :Union[str, Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :List[str]="gelu" ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :List[Any]=1_0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Any=3 ,_UpperCamelCase :Union[str, Any]=0.6 ,_UpperCamelCase :Optional[Any]=None ,): snake_case_ : Optional[int] = parent snake_case_ : List[Any] = batch_size snake_case_ : Tuple = image_size snake_case_ : int = patch_size snake_case_ : List[Any] = num_channels snake_case_ : int = is_training snake_case_ : List[Any] = use_labels snake_case_ : List[str] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : List[Any] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Dict = (image_size // patch_size) ** 2 snake_case_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def a__ ( self :str ): snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Any = None if self.use_labels: snake_case_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case_ : Dict = self.get_config() return config, pixel_values, labels def a__ ( self :Union[str, Any] ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def a__ ( self :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ): snake_case_ : Optional[Any] = TFViTMAEModel(config=_UpperCamelCase ) snake_case_ : Tuple = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Dict ): snake_case_ : str = TFViTMAEForPreTraining(_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,training=_UpperCamelCase ) # expected sequence length = num_patches snake_case_ : List[Any] = (self.image_size // self.patch_size) ** 2 snake_case_ : Union[str, Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : Optional[Any] = 1 snake_case_ : int = TFViTMAEForPreTraining(_UpperCamelCase ) snake_case_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = model(_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def a__ ( self :int ): snake_case_ : Tuple = self.prepare_config_and_inputs() ((snake_case_) , (snake_case_) , (snake_case_)) : Dict = config_and_inputs snake_case_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Optional[int] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} lowercase : Union[str, Any] = False lowercase : Tuple = False lowercase : Optional[int] = False lowercase : List[str] = False def a__ ( self :List[str] ): snake_case_ : Dict = TFViTMAEModelTester(self ) snake_case_ : List[str] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def a__ ( self :List[str] ): pass def a__ ( self :Union[str, Any] ): snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Union[str, Any] = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) snake_case_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase ,tf.keras.layers.Layer ) ) def a__ ( self :Tuple ): snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(_UpperCamelCase ) snake_case_ : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[Any] = [*signature.parameters.keys()] snake_case_ : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_UpperCamelCase ) def a__ ( self :Any ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase ) def a__ ( self :List[Any] ): # make the mask reproducible np.random.seed(2 ) snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[int] = model_class(_UpperCamelCase ) snake_case_ : int = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,noise=_UpperCamelCase ) snake_case_ : List[Any] = copy.deepcopy(self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) ) snake_case_ : Dict = model(**_UpperCamelCase ,noise=_UpperCamelCase ) snake_case_ : Optional[Any] = outputs_dict[0].numpy() snake_case_ : Dict = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def a__ ( self :Optional[int] ): # make the mask reproducible np.random.seed(2 ) snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_UpperCamelCase :Tuple ): snake_case_ : Union[str, Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(_UpperCamelCase ): snake_case_ : Any = v.numpy() else: snake_case_ : int = np.array(_UpperCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_UpperCamelCase ) snake_case_ : Tuple = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : str = prepare_numpy_arrays(_UpperCamelCase ) snake_case_ : Dict = model(_UpperCamelCase ,noise=_UpperCamelCase ) snake_case_ : int = model(**_UpperCamelCase ,noise=_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ): # make masks reproducible np.random.seed(2 ) snake_case_ : int = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : int = tf.constant(_UpperCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Dict = tf_noise super().check_pt_tf_models(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Dict ): # make mask reproducible np.random.seed(2 ) snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_UpperCamelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_UpperCamelCase ,_UpperCamelCase ),) if isinstance(_UpperCamelCase ,_UpperCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_UpperCamelCase ,"""_keras_serializable""" ,_UpperCamelCase ) } snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Any = tf.convert_to_tensor(_UpperCamelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Dict = main_layer_class(_UpperCamelCase ) snake_case_ : Union[str, Any] = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Any = tf.keras.Model(_UpperCamelCase ,outputs=main_layer(_UpperCamelCase ) ) snake_case_ : List[Any] = model(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = os.path.join(_UpperCamelCase ,"""keras_model.h5""" ) model.save(_UpperCamelCase ) snake_case_ : Optional[int] = tf.keras.models.load_model( _UpperCamelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_UpperCamelCase ,tf.keras.Model ) snake_case_ : Tuple = model(_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase ) @slow def a__ ( self :Dict ): # make mask reproducible np.random.seed(2 ) snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : int = model_class(_UpperCamelCase ) snake_case_ : List[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,noise=_UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : List[Any] = outputs.last_hidden_state.numpy() snake_case_ : Optional[Any] = 0 else: snake_case_ : int = outputs.logits.numpy() snake_case_ : Dict = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCamelCase ,saved_model=_UpperCamelCase ) snake_case_ : int = model_class.from_pretrained(_UpperCamelCase ) snake_case_ : Union[str, Any] = model(_UpperCamelCase ,noise=_UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Optional[int] = after_outputs["""last_hidden_state"""].numpy() snake_case_ : List[Any] = 0 else: snake_case_ : Any = after_outputs["""logits"""].numpy() snake_case_ : Any = 0 snake_case_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCamelCase ,1E-5 ) def a__ ( self :Union[str, Any] ): # make mask reproducible np.random.seed(2 ) snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_UpperCamelCase ) snake_case_ : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,noise=_UpperCamelCase ) snake_case_ : List[Any] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_UpperCamelCase ) snake_case_ : Optional[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : List[str] = new_model(_UpperCamelCase ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : Optional[Any] = new_model(_UpperCamelCase ,noise=_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def a__ ( self :Dict ): pass @slow def a__ ( self :int ): snake_case_ : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_UpperCamelCase ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def a__ ( self :List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def a__ ( self :Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) snake_case_ : int = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) snake_case_ : Any = self.default_image_processor snake_case_ : str = prepare_img() snake_case_ : Dict = image_processor(images=_UpperCamelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : Any = ViTMAEConfig() snake_case_ : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_UpperCamelCase ,noise=_UpperCamelCase ) # verify the logits snake_case_ : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape ,_UpperCamelCase ) snake_case_ : Any = tf.convert_to_tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_UpperCamelCase ,atol=1E-4 )
8
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Tuple = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def UpperCAmelCase ( lowerCamelCase_ :Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __A : Optional[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class __UpperCamelCase ( lowercase__ ): @staticmethod def a__ ( _UpperCamelCase :ArgumentParser ): snake_case_ : Optional[int] = parser.add_parser( """convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,) train_parser.add_argument("""--model_type""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" ,type=_UpperCamelCase ,default="""""" ,help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,) train_parser.set_defaults(func=_UpperCamelCase ) def __init__( self :str ,_UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :str ,*_UpperCamelCase :Any ,): snake_case_ : Dict = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(F'''Loading model {model_type}''' ) snake_case_ : str = model_type snake_case_ : Dict = tf_checkpoint snake_case_ : List[Any] = pytorch_dump_output snake_case_ : int = config snake_case_ : int = finetuning_task_name def a__ ( self :Tuple ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): snake_case_ : Optional[int] = self._tf_checkpoint snake_case_ : List[str] = """""" else: snake_case_ : Any = self._tf_checkpoint snake_case_ : Tuple = """""" convert_transfo_xl_checkpoint_to_pytorch( _UpperCamelCase ,self._config ,self._pytorch_dump_output ,_UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Union[str, Any] = ['flax', 'transformers'] def __init__( self :Dict ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Union[str, Any] ,*_UpperCamelCase :int ,**_UpperCamelCase :int ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :List[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : int = ['flax', 'transformers'] def __init__( self :Optional[int] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :int ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Tuple ,*_UpperCamelCase :Tuple ,**_UpperCamelCase :List[str] ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :int ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Optional[Any] ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Union[str, Any] = ['flax', 'transformers'] def __init__( self :Tuple ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Union[str, Any] ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :int ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Tuple ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Dict = ['flax', 'transformers'] def __init__( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Any ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Any ,*_UpperCamelCase :str ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Optional[Any] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :Union[str, Any] ): requires_backends(cls ,["""flax""", """transformers"""] )
8
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
1
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
1
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Any = CpmAntTokenizer lowercase : Tuple = False def a__ ( self :Optional[int] ): super().setUp() snake_case_ : List[Any] = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] snake_case_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def a__ ( self :str ): snake_case_ : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) snake_case_ : Optional[int] = """今天天气真好!""" snake_case_ : Optional[int] = ["""今天""", """天气""", """真""", """好""", """!"""] snake_case_ : Optional[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = """今天天气真好!""" snake_case_ : Any = [tokenizer.bos_token] + tokens snake_case_ : Dict = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase ) snake_case_ : Optional[Any] = tokenizer.decode(_UpperCamelCase ) self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
8
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : str = logging.get_logger(__name__) __A : str = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class __UpperCamelCase ( lowercase__ ): lowercase : Union[str, Any] = 'detr' lowercase : Dict = ['past_key_values'] lowercase : Tuple = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self :List[Any] ,_UpperCamelCase :int=True ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :str=3 ,_UpperCamelCase :Tuple=1_0_0 ,_UpperCamelCase :Union[str, Any]=6 ,_UpperCamelCase :Union[str, Any]=2_0_4_8 ,_UpperCamelCase :int=8 ,_UpperCamelCase :List[Any]=6 ,_UpperCamelCase :Union[str, Any]=2_0_4_8 ,_UpperCamelCase :int=8 ,_UpperCamelCase :List[Any]=0.0 ,_UpperCamelCase :Optional[int]=0.0 ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Union[str, Any]="relu" ,_UpperCamelCase :Optional[Any]=2_5_6 ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :Optional[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.02 ,_UpperCamelCase :Tuple=1.0 ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Any="sine" ,_UpperCamelCase :Tuple="resnet50" ,_UpperCamelCase :int=True ,_UpperCamelCase :Any=False ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :Union[str, Any]=5 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :List[Any]=1 ,_UpperCamelCase :Optional[int]=1 ,_UpperCamelCase :Dict=5 ,_UpperCamelCase :List[Any]=2 ,_UpperCamelCase :List[Any]=0.1 ,**_UpperCamelCase :Dict ,): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_UpperCamelCase ,_UpperCamelCase ): snake_case_ : Any = backbone_config.get("""model_type""" ) snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(_UpperCamelCase ) # set timm attributes to None snake_case_ , snake_case_ , snake_case_ : List[str] = None, None, None snake_case_ : Optional[int] = use_timm_backbone snake_case_ : Optional[int] = backbone_config snake_case_ : Dict = num_channels snake_case_ : Optional[int] = num_queries snake_case_ : Union[str, Any] = d_model snake_case_ : int = encoder_ffn_dim snake_case_ : Optional[int] = encoder_layers snake_case_ : List[Any] = encoder_attention_heads snake_case_ : List[Any] = decoder_ffn_dim snake_case_ : Optional[int] = decoder_layers snake_case_ : List[Any] = decoder_attention_heads snake_case_ : Optional[int] = dropout snake_case_ : List[str] = attention_dropout snake_case_ : Dict = activation_dropout snake_case_ : List[str] = activation_function snake_case_ : Any = init_std snake_case_ : int = init_xavier_std snake_case_ : Dict = encoder_layerdrop snake_case_ : Tuple = decoder_layerdrop snake_case_ : Optional[int] = encoder_layers snake_case_ : Union[str, Any] = auxiliary_loss snake_case_ : List[str] = position_embedding_type snake_case_ : str = backbone snake_case_ : int = use_pretrained_backbone snake_case_ : Dict = dilation # Hungarian matcher snake_case_ : List[Any] = class_cost snake_case_ : Any = bbox_cost snake_case_ : Tuple = giou_cost # Loss coefficients snake_case_ : Optional[int] = mask_loss_coefficient snake_case_ : str = dice_loss_coefficient snake_case_ : List[Any] = bbox_loss_coefficient snake_case_ : Tuple = giou_loss_coefficient snake_case_ : Any = eos_coefficient super().__init__(is_encoder_decoder=_UpperCamelCase ,**_UpperCamelCase ) @property def a__ ( self :Union[str, Any] ): return self.encoder_attention_heads @property def a__ ( self :int ): return self.d_model @classmethod def a__ ( cls :int ,_UpperCamelCase :PretrainedConfig ,**_UpperCamelCase :Any ): return cls(backbone_config=_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :int ): snake_case_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: snake_case_ : Optional[Any] = self.backbone_config.to_dict() snake_case_ : Dict = self.__class__.model_type return output class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = version.parse('1.11' ) @property def a__ ( self :int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def a__ ( self :Union[str, Any] ): return 1E-5 @property def a__ ( self :Optional[int] ): return 1_2
8
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
1
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCAmelCase ( lowerCamelCase_ :str = "isbn/0140328726" ): '''simple docstring''' snake_case_ : Tuple = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: snake_case_ : int = F'''{olid} is not a valid Open Library olid''' raise ValueError(lowerCamelCase_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def UpperCAmelCase ( lowerCamelCase_ :dict ): '''simple docstring''' snake_case_ : List[str] = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } snake_case_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} snake_case_ : List[Any] = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] snake_case_ : Optional[int] = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : str = """, """.join(lowerCamelCase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __A : Any = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(F'\nSearching Open Library for ISBN: {isbn}...\n') try: __A : Any = summarize_book(get_openlibrary_data(F'isbn/{isbn}')) print('\n'.join(F'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'Sorry, there are no results for ISBN: {isbn}.')
8
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
1
'''simple docstring''' from __future__ import annotations def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) # We need to create solution object to save path. snake_case_ : Dict = [[0 for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )] snake_case_ : List[str] = run_maze(lowerCamelCase_ , 0 , 0 , lowerCamelCase_ ) if solved: print("""\n""".join(str(lowerCamelCase_ ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :list[list[int]] ): '''simple docstring''' snake_case_ : Any = len(lowerCamelCase_ ) # Final check point. if i == j == (size - 1): snake_case_ : List[str] = 1 return True snake_case_ : Any = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ : Optional[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ : List[Any] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ : int = 1 # check for directions if ( run_maze(lowerCamelCase_ , i + 1 , lowerCamelCase_ , lowerCamelCase_ ) or run_maze(lowerCamelCase_ , lowerCamelCase_ , j + 1 , lowerCamelCase_ ) or run_maze(lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ ) or run_maze(lowerCamelCase_ , lowerCamelCase_ , j - 1 , lowerCamelCase_ ) ): return True snake_case_ : List[Any] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' if height >= 1: move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) move_disk(lowerCamelCase_ , lowerCamelCase_ ) move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ): '''simple docstring''' print("""moving disk from""" , lowerCamelCase_ , """to""" , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = int(input("""Height of hanoi: """ ).strip() ) move_tower(lowerCamelCase_ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
8
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __A : List[str] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __A : Tuple = [0, 25, 50] __A : Any = [25, 50, 75] __A : List[Any] = fuzz.membership.trimf(X, abca) __A : Optional[Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __A : Tuple = np.ones(75) __A : Dict = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __A : List[str] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __A : List[str] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __A : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __A : int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __A : List[Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __A : str = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __A : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
8
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
1
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A : str = logging.getLogger(__name__) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=lowerCamelCase_ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=lowerCamelCase_ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=lowerCamelCase_ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=lowerCamelCase_ , default=10_00 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=lowerCamelCase_ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=lowerCamelCase_ , default=5_12 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase_ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) snake_case_ : Optional[int] = parser.parse_args() return args def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' def fn(lowerCamelCase_ :Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' snake_case_ : Optional[Any] = [] for i in range(len(tokenized_data["""input_ids"""] ) ): snake_case_ : List[Any] = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } snake_case_ : Tuple = tf.train.Features(feature=lowerCamelCase_ ) snake_case_ : Union[str, Any] = tf.train.Example(features=lowerCamelCase_ ) snake_case_ : List[Any] = example.SerializeToString() records.append(lowerCamelCase_ ) return records def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : List[str] = min(len(lowerCamelCase_ ) , args.limit ) snake_case_ : Optional[Any] = dataset.select(range(lowerCamelCase_ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : int = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) else: snake_case_ : List[str] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : List[str] = tokenize_function(lowerCamelCase_ ) snake_case_ : Optional[int] = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase_ :Tuple ): # Concatenate all texts. snake_case_ : Any = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : Tuple = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : str = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Tuple = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase_ , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : List[str] = dataset_tokenized.map(lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=10_00 , num_proc=4 ) snake_case_ : Optional[Any] = 0 snake_case_ : str = 0 for shard in range(0 , len(lowerCamelCase_ ) , args.shard_size ): snake_case_ : List[Any] = grouped_dataset[shard : shard + args.shard_size] snake_case_ : Dict = len(dataset_snapshot["""input_ids"""] ) snake_case_ : int = os.path.join(lowerCamelCase_ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : List[Any] = get_serialized_examples(lowerCamelCase_ ) with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file: for i in range(len(lowerCamelCase_ ) ): snake_case_ : Dict = serialized_examples[i] out_file.write(lowerCamelCase_ ) print("""Wrote file {} containing {} records""".format(lowerCamelCase_ , lowerCamelCase_ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=lowerCamelCase_ ) if __name__ == "__main__": __A : List[Any] = parse_args() main(args)
8
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
1
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets __A : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __A : Tuple = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __A : str = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def a__ ( self :Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def a__ ( self :int ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Union[str, Any]=1 ,_UpperCamelCase :Any="binary" ,_UpperCamelCase :Dict=None ): snake_case_ : int = fa_score( _UpperCamelCase ,_UpperCamelCase ,labels=_UpperCamelCase ,pos_label=_UpperCamelCase ,average=_UpperCamelCase ,sample_weight=_UpperCamelCase ) return {"f1": float(_UpperCamelCase ) if score.size == 1 else score}
8
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
1
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : List[str] = 'efficientnet' def __init__( self :Optional[int] ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 6_0_0 ,_UpperCamelCase :float = 2.0 ,_UpperCamelCase :float = 3.1 ,_UpperCamelCase :int = 8 ,_UpperCamelCase :List[int] = [3, 3, 5, 3, 5, 5, 3] ,_UpperCamelCase :List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] ,_UpperCamelCase :List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] ,_UpperCamelCase :List[int] = [] ,_UpperCamelCase :List[int] = [1, 2, 2, 2, 1, 2, 1] ,_UpperCamelCase :List[int] = [1, 2, 2, 3, 3, 4, 1] ,_UpperCamelCase :List[int] = [1, 6, 6, 6, 6, 6, 6] ,_UpperCamelCase :float = 0.25 ,_UpperCamelCase :str = "swish" ,_UpperCamelCase :int = 2_5_6_0 ,_UpperCamelCase :str = "mean" ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :float = 0.0_01 ,_UpperCamelCase :float = 0.99 ,_UpperCamelCase :float = 0.5 ,_UpperCamelCase :float = 0.2 ,**_UpperCamelCase :str ,): super().__init__(**_UpperCamelCase ) snake_case_ : Optional[Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = width_coefficient snake_case_ : List[str] = depth_coefficient snake_case_ : Union[str, Any] = depth_divisor snake_case_ : str = kernel_sizes snake_case_ : Tuple = in_channels snake_case_ : int = out_channels snake_case_ : Any = depthwise_padding snake_case_ : Optional[int] = strides snake_case_ : List[str] = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : List[str] = squeeze_expansion_ratio snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : List[str] = pooling_type snake_case_ : Optional[int] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Dict = batch_norm_momentum snake_case_ : Any = dropout_rate snake_case_ : List[Any] = drop_connect_rate snake_case_ : str = sum(_UpperCamelCase ) * 4 class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = version.parse('1.11' ) @property def a__ ( self :Dict ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def a__ ( self :Dict ): return 1E-5
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
1
'''simple docstring''' from __future__ import annotations def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' return [ord(lowerCamelCase_ ) - 96 for elem in plain] def UpperCAmelCase ( lowerCamelCase_ :list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , lowerCamelCase_ ) print("""Decoded:""" , decode(lowerCamelCase_ ) ) if __name__ == "__main__": main()
8
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
1
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : List[str] = AudioLDMPipeline lowercase : int = TEXT_TO_AUDIO_PARAMS lowercase : Any = TEXT_TO_AUDIO_BATCH_PARAMS lowercase : List[Any] = frozenset( [ 'num_inference_steps', 'num_waveforms_per_prompt', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) def a__ ( self :Optional[Any] ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=(3_2, 6_4) ,class_embed_type="""simple_projection""" ,projection_class_embeddings_input_dim=3_2 ,class_embeddings_concat=_UpperCamelCase ,) snake_case_ : Tuple = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="""scaled_linear""" ,clip_sample=_UpperCamelCase ,set_alpha_to_one=_UpperCamelCase ,) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=1 ,out_channels=1 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) torch.manual_seed(0 ) snake_case_ : Optional[Any] = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,projection_dim=3_2 ,) snake_case_ : str = ClapTextModelWithProjection(_UpperCamelCase ) snake_case_ : Optional[Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" ,model_max_length=7_7 ) snake_case_ : str = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6_0_0_0 ,upsample_initial_channel=1_6 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_UpperCamelCase ,) snake_case_ : int = SpeechTaHifiGan(_UpperCamelCase ) snake_case_ : Tuple = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """vocoder""": vocoder, } return components def a__ ( self :str ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Optional[int]=0 ): if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Tuple = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : Union[str, Any] = { """prompt""": """A hammer hitting a wooden surface""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, } return inputs def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : str = self.get_dummy_components() snake_case_ : Any = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : List[str] = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : List[Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : int = audioldm_pipe(**_UpperCamelCase ) snake_case_ : List[Any] = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 2_5_6 snake_case_ : Any = audio[:1_0] snake_case_ : Union[str, Any] = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def a__ ( self :List[Any] ): snake_case_ : Tuple = self.get_dummy_components() snake_case_ : Optional[Any] = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Optional[int] = audioldm_pipe.to(_UpperCamelCase ) snake_case_ : int = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Optional[Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Any = 3 * [inputs["""prompt"""]] # forward snake_case_ : Dict = audioldm_pipe(**_UpperCamelCase ) snake_case_ : Dict = output.audios[0] snake_case_ : Dict = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : List[Any] = 3 * [inputs.pop("""prompt""" )] snake_case_ : Optional[Any] = audioldm_pipe.tokenizer( _UpperCamelCase ,padding="""max_length""" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_UpperCamelCase ,return_tensors="""pt""" ,) snake_case_ : Optional[int] = text_inputs["""input_ids"""].to(_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.text_encoder( _UpperCamelCase ,) snake_case_ : Optional[Any] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state snake_case_ : Optional[Any] = F.normalize(_UpperCamelCase ,dim=-1 ) snake_case_ : List[str] = prompt_embeds # forward snake_case_ : str = audioldm_pipe(**_UpperCamelCase ) snake_case_ : Optional[int] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def a__ ( self :List[str] ): snake_case_ : str = self.get_dummy_components() snake_case_ : Any = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.to(_UpperCamelCase ) snake_case_ : Optional[Any] = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : int = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Any = 3 * ["""this is a negative prompt"""] snake_case_ : int = negative_prompt snake_case_ : str = 3 * [inputs["""prompt"""]] # forward snake_case_ : Any = audioldm_pipe(**_UpperCamelCase ) snake_case_ : int = output.audios[0] snake_case_ : Any = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Optional[int] = 3 * [inputs.pop("""prompt""" )] snake_case_ : str = [] for p in [prompt, negative_prompt]: snake_case_ : Union[str, Any] = audioldm_pipe.tokenizer( _UpperCamelCase ,padding="""max_length""" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_UpperCamelCase ,return_tensors="""pt""" ,) snake_case_ : List[Any] = text_inputs["""input_ids"""].to(_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.text_encoder( _UpperCamelCase ,) snake_case_ : List[str] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state snake_case_ : List[str] = F.normalize(_UpperCamelCase ,dim=-1 ) embeds.append(_UpperCamelCase ) snake_case_ , snake_case_ : Optional[Any] = embeds # forward snake_case_ : Tuple = audioldm_pipe(**_UpperCamelCase ) snake_case_ : Dict = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def a__ ( self :Optional[int] ): snake_case_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : int = self.get_dummy_components() snake_case_ : List[str] = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) snake_case_ : Optional[Any] = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Optional[int] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Optional[Any] = """egg cracking""" snake_case_ : Union[str, Any] = audioldm_pipe(**_UpperCamelCase ,negative_prompt=_UpperCamelCase ) snake_case_ : Dict = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 2_5_6 snake_case_ : Union[str, Any] = audio[:1_0] snake_case_ : Optional[int] = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : int = self.get_dummy_components() snake_case_ : Optional[Any] = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) snake_case_ : Optional[int] = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Tuple = """A hammer hitting a wooden surface""" # test num_waveforms_per_prompt=1 (default) snake_case_ : Dict = audioldm_pipe(_UpperCamelCase ,num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts snake_case_ : Tuple = 2 snake_case_ : Tuple = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt snake_case_ : Dict = 2 snake_case_ : Dict = audioldm_pipe(_UpperCamelCase ,num_inference_steps=2 ,num_waveforms_per_prompt=_UpperCamelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts snake_case_ : Dict = 2 snake_case_ : Optional[Any] = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_UpperCamelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def a__ ( self :str ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[int] = self.get_dummy_components() snake_case_ : List[str] = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = audioldm_pipe.vocoder.config.sampling_rate snake_case_ : Any = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.0_16 ,**_UpperCamelCase ) snake_case_ : Any = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.0_16 snake_case_ : Dict = audioldm_pipe(audio_length_in_s=0.0_32 ,**_UpperCamelCase ) snake_case_ : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.0_32 def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = self.get_dummy_components() snake_case_ : Optional[Any] = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ : Union[str, Any] = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : int = ["""hey"""] snake_case_ : Optional[int] = audioldm_pipe(_UpperCamelCase ,num_inference_steps=1 ) snake_case_ : List[Any] = output.audios.shape assert audio_shape == (1, 2_5_6) snake_case_ : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 snake_case_ : Optional[Any] = SpeechTaHifiGan(_UpperCamelCase ).to(_UpperCamelCase ) snake_case_ : Dict = audioldm_pipe(_UpperCamelCase ,num_inference_steps=1 ) snake_case_ : Union[str, Any] = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def a__ ( self :Any ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase ) def a__ ( self :List[str] ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_UpperCamelCase ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def a__ ( self :List[Any] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase ) @slow class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :str ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[Any]="cpu" ,_UpperCamelCase :Dict=torch.floataa ,_UpperCamelCase :Any=0 ): snake_case_ : str = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : List[Any] = np.random.RandomState(_UpperCamelCase ).standard_normal((1, 8, 1_2_8, 1_6) ) snake_case_ : int = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase ,dtype=_UpperCamelCase ) snake_case_ : Dict = { """prompt""": """A hammer hitting a wooden surface""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 2.5, } return inputs def a__ ( self :int ): snake_case_ : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) snake_case_ : List[Any] = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : List[str] = self.get_inputs(_UpperCamelCase ) snake_case_ : List[Any] = 2_5 snake_case_ : str = audioldm_pipe(**_UpperCamelCase ).audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 8_1_9_2_0 snake_case_ : str = audio[7_7_2_3_0:7_7_2_4_0] snake_case_ : Optional[int] = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) snake_case_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def a__ ( self :Any ): snake_case_ : Union[str, Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) snake_case_ : Tuple = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) snake_case_ : Optional[int] = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Optional[int] = self.get_inputs(_UpperCamelCase ) snake_case_ : Tuple = audioldm_pipe(**_UpperCamelCase ).audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 8_1_9_2_0 snake_case_ : List[str] = audio[2_7_7_8_0:2_7_7_9_0] snake_case_ : List[str] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) snake_case_ : List[Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
1
'''simple docstring''' from __future__ import annotations def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :int ): '''simple docstring''' # Checks if the entire collection has been sorted if len(lowerCamelCase_ ) <= 1 or n <= 1: return insert_next(lowerCamelCase_ , n - 1 ) rec_insertion_sort(lowerCamelCase_ , n - 1 ) def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :int ): '''simple docstring''' # Checks order between adjacent elements if index >= len(lowerCamelCase_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order snake_case_ , snake_case_ : Union[str, Any] = ( collection[index], collection[index - 1], ) insert_next(lowerCamelCase_ , index + 1 ) if __name__ == "__main__": __A : Optional[int] = input('Enter integers separated by spaces: ') __A : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : Union[str, Any] = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case_ : List[str] = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } snake_case_ : List[Any] = F'''{src_lang}-{tgt_lang}''' snake_case_ : List[str] = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) snake_case_ : Any = os.path.join(lowerCamelCase_ , """README.md""" ) print(F'''Generating {path}''' ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCamelCase_ ) # make sure we are under the root of the project __A : List[str] = Path(__file__).resolve().parent.parent.parent __A : Any = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __A, __A, __A : Any = model_name.split('-') __A : Any = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __UpperCamelCase ( lowercase__ ): lowercase : List[str] = 'Wav2Vec2FeatureExtractor' lowercase : Union[str, Any] = 'AutoTokenizer' def __init__( self :Optional[Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :List[Any] ): super().__init__(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : List[Any] = self.feature_extractor snake_case_ : Dict = False @classmethod def a__ ( cls :Optional[int] ,_UpperCamelCase :str ,**_UpperCamelCase :List[str] ): try: return super().from_pretrained(_UpperCamelCase ,**_UpperCamelCase ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ ,_UpperCamelCase ,) snake_case_ : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase ,**_UpperCamelCase ) return cls(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase ) def __call__( self :Optional[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Tuple ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCamelCase ,**_UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) snake_case_ : Optional[Any] = kwargs.pop("""raw_speech""" ) else: snake_case_ : Any = kwargs.pop("""audio""" ,_UpperCamelCase ) snake_case_ : str = kwargs.pop("""sampling_rate""" ,_UpperCamelCase ) snake_case_ : Optional[Any] = kwargs.pop("""text""" ,_UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case_ : Tuple = args[0] snake_case_ : int = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: snake_case_ : Optional[Any] = self.feature_extractor(_UpperCamelCase ,*_UpperCamelCase ,sampling_rate=_UpperCamelCase ,**_UpperCamelCase ) if text is not None: snake_case_ : Any = self.tokenizer(_UpperCamelCase ,**_UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ : str = encodings["""input_ids"""] return inputs def a__ ( self :Dict ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Any = kwargs.pop("""input_features""" ,_UpperCamelCase ) snake_case_ : List[Any] = kwargs.pop("""labels""" ,_UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case_ : Any = args[0] snake_case_ : List[str] = args[1:] if input_features is not None: snake_case_ : int = self.feature_extractor.pad(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase ) if labels is not None: snake_case_ : Optional[int] = self.tokenizer.pad(_UpperCamelCase ,**_UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: snake_case_ : List[Any] = labels["""input_ids"""] return input_features def a__ ( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :int ): return self.tokenizer.batch_decode(*_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :Optional[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Optional[int] ): return self.tokenizer.decode(*_UpperCamelCase ,**_UpperCamelCase ) @contextmanager def a__ ( self :List[str] ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) snake_case_ : str = True snake_case_ : Optional[int] = self.tokenizer yield snake_case_ : Any = self.feature_extractor snake_case_ : str = False
8
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __A : List[str] = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class __UpperCamelCase : def __init__( self :List[Any] ,_UpperCamelCase :int = 1_4 ): if group not in primes: raise ValueError("""Unsupported Group""" ) snake_case_ : Optional[int] = primes[group]["""prime"""] snake_case_ : Union[str, Any] = primes[group]["""generator"""] snake_case_ : str = int(hexlify(urandom(3_2 ) ) ,base=1_6 ) def a__ ( self :Dict ): return hex(self.__private_key )[2:] def a__ ( self :Dict ): snake_case_ : Union[str, Any] = pow(self.generator ,self.__private_key ,self.prime ) return hex(_UpperCamelCase )[2:] def a__ ( self :Any ,_UpperCamelCase :int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(_UpperCamelCase ,(self.prime - 1) // 2 ,self.prime ) == 1 ) def a__ ( self :Optional[Any] ,_UpperCamelCase :str ): snake_case_ : Union[str, Any] = int(_UpperCamelCase ,base=1_6 ) if not self.is_valid_public_key(_UpperCamelCase ): raise ValueError("""Invalid public key""" ) snake_case_ : str = pow(_UpperCamelCase ,self.__private_key ,self.prime ) return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest() @staticmethod def a__ ( _UpperCamelCase :int ,_UpperCamelCase :int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(_UpperCamelCase ,(prime - 1) // 2 ,_UpperCamelCase ) == 1 ) @staticmethod def a__ ( _UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :int = 1_4 ): snake_case_ : str = int(_UpperCamelCase ,base=1_6 ) snake_case_ : str = int(_UpperCamelCase ,base=1_6 ) snake_case_ : Dict = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(_UpperCamelCase ,_UpperCamelCase ): raise ValueError("""Invalid public key""" ) snake_case_ : Any = pow(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
1
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[str] = logging.get_logger('transformers.models.speecht5') __A : Any = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } __A : Dict = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } __A : int = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } __A : List[str] = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } __A : List[str] = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } __A : Any = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } __A : Any = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } __A : Any = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } __A : Optional[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Any = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Union[str, Any] = [] __A : Union[str, Any] = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] __A : Any = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] __A : List[Any] = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] __A : str = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): snake_case_ : Any = getattr(lowerCamelCase_ , lowerCamelCase_ ) if weight_type is not None: snake_case_ : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape else: snake_case_ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case_ : Tuple = value elif weight_type == "weight_g": snake_case_ : Optional[int] = value elif weight_type == "weight_v": snake_case_ : str = value elif weight_type == "bias": snake_case_ : Optional[Any] = value elif weight_type == "running_mean": snake_case_ : List[Any] = value elif weight_type == "running_var": snake_case_ : Any = value elif weight_type == "num_batches_tracked": snake_case_ : str = value else: snake_case_ : Any = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: snake_case_ , snake_case_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : str = [] if task == "s2t": snake_case_ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ : int = MAPPING_S2T snake_case_ : Dict = IGNORE_KEYS_S2T elif task == "t2s": snake_case_ : List[str] = None snake_case_ : int = MAPPING_T2S snake_case_ : List[str] = IGNORE_KEYS_T2S elif task == "s2s": snake_case_ : Dict = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ : Union[str, Any] = MAPPING_S2S snake_case_ : Union[str, Any] = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(lowerCamelCase_ , lowerCamelCase_ ): logger.info(F'''{name} was ignored''' ) continue snake_case_ : str = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: snake_case_ , snake_case_ : int = key.split(""".*.""" ) if prefix in name and suffix in name: snake_case_ : Tuple = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: snake_case_ : Optional[int] = True if "*" in mapped_key: snake_case_ : str = name.split(lowerCamelCase_ )[0].split(""".""" )[-2] snake_case_ : List[Any] = mapped_key.replace("""*""" , lowerCamelCase_ ) if "weight_g" in name: snake_case_ : Optional[Any] = """weight_g""" elif "weight_v" in name: snake_case_ : List[Any] = """weight_v""" elif "bias" in name: snake_case_ : Optional[int] = """bias""" elif "weight" in name: snake_case_ : Optional[int] = """weight""" elif "running_mean" in name: snake_case_ : List[str] = """running_mean""" elif "running_var" in name: snake_case_ : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: snake_case_ : Union[str, Any] = """num_batches_tracked""" else: snake_case_ : Dict = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) continue if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' snake_case_ : Dict = full_name.split("""conv_layers.""" )[-1] snake_case_ : Dict = name.split(""".""" ) snake_case_ : List[Any] = int(items[0] ) snake_case_ : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case_ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case_ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case_ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case_ : Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[Any]=None , ): '''simple docstring''' if config_path is not None: snake_case_ : Tuple = SpeechTaConfig.from_pretrained(lowerCamelCase_ ) else: snake_case_ : Any = SpeechTaConfig() if task == "s2t": snake_case_ : Optional[Any] = config.max_text_positions snake_case_ : Union[str, Any] = SpeechTaForSpeechToText(lowerCamelCase_ ) elif task == "t2s": snake_case_ : Union[str, Any] = 18_76 snake_case_ : Tuple = 6_00 snake_case_ : Union[str, Any] = config.max_speech_positions snake_case_ : Any = SpeechTaForTextToSpeech(lowerCamelCase_ ) elif task == "s2s": snake_case_ : Optional[int] = 18_76 snake_case_ : int = config.max_speech_positions snake_case_ : Any = SpeechTaForSpeechToSpeech(lowerCamelCase_ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: snake_case_ : int = SpeechTaTokenizer(lowerCamelCase_ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it snake_case_ : Dict = AddedToken("""<mask>""" , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) snake_case_ : Optional[int] = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) snake_case_ : List[str] = SpeechTaFeatureExtractor() snake_case_ : Tuple = SpeechTaProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) processor.save_pretrained(lowerCamelCase_ ) snake_case_ : Tuple = torch.load(lowerCamelCase_ ) recursively_load_weights(fairseq_checkpoint["""model"""] , lowerCamelCase_ , lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase_ ) model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) __A : Any = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
8
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
1
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if hor == 1_28: snake_case_ : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") snake_case_ : Tuple = (32, 1_28, 2_56) snake_case_ : Dict = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: snake_case_ : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") snake_case_ : Union[str, Any] = (32, 64, 1_28, 2_56) snake_case_ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") snake_case_ : Dict = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) snake_case_ : str = model.state_dict() snake_case_ : int = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_55_36, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } snake_case_ : Optional[Any] = UNetaDModel(**lowerCamelCase_ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ : List[Any] = state_dict.pop(lowerCamelCase_ ) hf_value_function.load_state_dict(lowerCamelCase_ ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 1_28, 2_56), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_55_36, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } snake_case_ : int = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) snake_case_ : int = model snake_case_ : Dict = UNetaDModel(**lowerCamelCase_ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ : List[str] = state_dict.pop(lowerCamelCase_ ) hf_value_function.load_state_dict(lowerCamelCase_ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
8
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
1
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar __A : Union[str, Any] = TypeVar('T') class __UpperCamelCase ( Generic[T] ): lowercase : deque[T] # Cache store of keys lowercase : set[T] # References of the keys in cache lowercase : int = 1_0 # Maximum capacity of cache def __init__( self :int ,_UpperCamelCase :int ): snake_case_ : List[str] = deque() snake_case_ : Union[str, Any] = set() if not n: snake_case_ : List[str] = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: snake_case_ : Tuple = n def a__ ( self :int ,_UpperCamelCase :T ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: snake_case_ : Union[str, Any] = self.dq_store.pop() self.key_reference.remove(_UpperCamelCase ) else: self.dq_store.remove(_UpperCamelCase ) self.dq_store.appendleft(_UpperCamelCase ) self.key_reference.add(_UpperCamelCase ) def a__ ( self :Optional[int] ): for k in self.dq_store: print(_UpperCamelCase ) def __repr__( self :Any ): return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() __A : LRUCache[str | int] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
1
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' snake_case_ : Any = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) snake_case_ : Optional[int] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase_ ) if matches: snake_case_ : Dict = float(matches[1] ) snake_case_ : List[str] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". snake_case_ : Optional[Any] = 10_01 snake_case_ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case_ : List[str] = """huggingface/label-files""" snake_case_ : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ : List[str] = {int(lowerCamelCase_ ) + 1: v for k, v in idalabel.items()} snake_case_ : Union[str, Any] = """background""" snake_case_ : Dict = idalabel snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=False ): '''simple docstring''' snake_case_ : str = get_mobilenet_va_config(lowerCamelCase_ ) # Load 🤗 model snake_case_ : List[Any] = MobileNetVaForImageClassification(lowerCamelCase_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor snake_case_ : Tuple = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) snake_case_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ : Optional[Any] = model(**lowerCamelCase_ ) snake_case_ : List[Any] = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": snake_case_ : Optional[Any] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": snake_case_ : Optional[int] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: snake_case_ : List[Any] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print("""Pushing to the hub...""" ) snake_case_ : Optional[Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase_ ) model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A : int = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
8
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __A : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ['DPTFeatureExtractor'] __A : Any = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
1
'''simple docstring''' import json import sys def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :str ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Union[str, Any] = json.load(lowerCamelCase_ ) snake_case_ : List[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(lowerCamelCase_ ): snake_case_ : Dict = results[benchmark_name] snake_case_ : List[Any] = benchmark_name.split("""/""" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) snake_case_ : List[Any] = """| metric |""" snake_case_ : str = """|--------|""" snake_case_ : Tuple = """| new / old (diff) |""" for metric_name in sorted(lowerCamelCase_ ): snake_case_ : Optional[int] = benchmark_res[metric_name] snake_case_ : Any = metric_vals["""new"""] snake_case_ : Union[str, Any] = metric_vals.get("""old""" , lowerCamelCase_ ) snake_case_ : int = metric_vals.get("""diff""" , lowerCamelCase_ ) snake_case_ : Union[str, Any] = F''' {new_val:f}''' if isinstance(lowerCamelCase_ , (int, float) ) else """None""" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase_ , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase_ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.writelines("""\n""".join(lowerCamelCase_ ) ) if __name__ == "__main__": __A : Dict = sys.argv[1] __A : Union[str, Any] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
8
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' for param in module.parameters(): snake_case_ : str = False def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): snake_case_ : List[Any] = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : Any = plt.imshow(lowerCamelCase_ ) fig.axes.get_xaxis().set_visible(lowerCamelCase_ ) fig.axes.get_yaxis().set_visible(lowerCamelCase_ ) plt.show() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[int] = datetime.now() snake_case_ : int = current_time.strftime("""%H:%M:%S""" ) return timestamp
8
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
1
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def UpperCAmelCase ( ): '''simple docstring''' with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : Optional[int] = [1, 2, 3] with pytest.raises(lowerCamelCase_ ): with parallel_backend("""unsupported backend""" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 ) with pytest.raises(lowerCamelCase_ ): with parallel_backend("""unsupported backend""" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' snake_case_ : Optional[Any] = [1, 2] snake_case_ : str = {"""a""": 1, """b""": 2} snake_case_ : List[str] = {"""a""": [1, 2], """b""": [3, 4]} snake_case_ : Union[str, Any] = {"""a""": {"""1""": 1}, """b""": 2} snake_case_ : Union[str, Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} snake_case_ : Optional[int] = [2, 3] snake_case_ : Dict = {"""a""": 2, """b""": 3} snake_case_ : Optional[Any] = {"""a""": [2, 3], """b""": [4, 5]} snake_case_ : Dict = {"""a""": {"""1""": 2}, """b""": 3} snake_case_ : str = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
8
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
1
'''simple docstring''' from itertools import product def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : int = sides_number snake_case_ : List[Any] = max_face_number * dice_number snake_case_ : Union[str, Any] = [0] * (max_total + 1) snake_case_ : Optional[Any] = 1 snake_case_ : Any = range(lowerCamelCase_ , max_face_number + 1 ) for dice_numbers in product(lowerCamelCase_ , repeat=lowerCamelCase_ ): snake_case_ : Union[str, Any] = sum(lowerCamelCase_ ) totals_frequencies[total] += 1 return totals_frequencies def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = total_frequency_distribution( sides_number=4 , dice_number=9 ) snake_case_ : List[str] = total_frequency_distribution( sides_number=6 , dice_number=6 ) snake_case_ : Tuple = 0 snake_case_ : Optional[Any] = 9 snake_case_ : str = 4 * 9 snake_case_ : Any = 6 for peter_total in range(lowerCamelCase_ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) snake_case_ : Union[str, Any] = (4**9) * (6**6) snake_case_ : Union[str, Any] = peter_wins_count / total_games_number snake_case_ : Union[str, Any] = round(lowerCamelCase_ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'{solution() = }')
8
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
1
'''simple docstring''' from math import pi def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
8
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""Input value must be a 'int' type""" ) return bin(lowerCamelCase_ ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
1
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __A : Tuple = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } __A : Dict = { '169M': 768, '430M': 1_024, '1B5': 2_048, '3B': 2_560, '7B': 4_096, '14B': 5_120, } def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' snake_case_ : str = list(state_dict.keys() ) for name in state_dict_keys: snake_case_ : Tuple = state_dict.pop(lowerCamelCase_ ) # emb -> embedding if name.startswith("""emb.""" ): snake_case_ : Optional[Any] = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): snake_case_ : List[Any] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention snake_case_ : Optional[int] = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , lowerCamelCase_ ) # ffn -> feed_forward snake_case_ : List[str] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , lowerCamelCase_ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): snake_case_ : int = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): snake_case_ : Optional[Any] = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): snake_case_ : List[str] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": snake_case_ : Union[str, Any] = """rwkv.""" + name snake_case_ : List[str] = weight return state_dict def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :str=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) snake_case_ : Any = 5_02_77 snake_case_ : Dict = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: snake_case_ : List[Any] = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ ) snake_case_ : Tuple = len(lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) # 2. Build the config snake_case_ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: snake_case_ : Union[str, Any] = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' ) snake_case_ : Dict = RwkvConfig( vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(lowerCamelCase_ ) # 3. Download model file then convert state_dict snake_case_ : List[str] = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : Any = torch.load(lowerCamelCase_ , map_location="""cpu""" ) snake_case_ : Tuple = convert_state_dict(lowerCamelCase_ ) # 4. Split in shards and save snake_case_ , snake_case_ : str = shard_checkpoint(lowerCamelCase_ ) for shard_file, shard in shards.items(): torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) if index is not None: snake_case_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) # Save the index as well with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: snake_case_ : Union[str, Any] = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n""" f.write(lowerCamelCase_ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) snake_case_ : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: snake_case_ : Dict = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) snake_case_ : Any = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ ) model.push_to_hub(lowerCamelCase_ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) __A : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
8
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
1
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
1
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def UpperCAmelCase ( lowerCamelCase_ :int = 8 ): '''simple docstring''' snake_case_ : List[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :int ): '''simple docstring''' # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowerCamelCase_ ) snake_case_ : Optional[Any] = i // 3 snake_case_ : Union[str, Any] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) snake_case_ : Optional[int] = ( chars_incl + random(lowerCamelCase_ , quotient + remainder ) + random(lowerCamelCase_ , lowerCamelCase_ ) + random(lowerCamelCase_ , lowerCamelCase_ ) ) snake_case_ : Union[str, Any] = list(lowerCamelCase_ ) shuffle(lowerCamelCase_ ) return "".join(lowerCamelCase_ ) # random is a generalised function for letters, characters and numbers def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :int ): '''simple docstring''' return "".join(secrets.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :int ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :int ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :int = 8 ): '''simple docstring''' if len(lowerCamelCase_ ) < min_length: # Your Password must be at least 8 characters long return False snake_case_ : List[str] = any(char in ascii_uppercase for char in password ) snake_case_ : List[str] = any(char in ascii_lowercase for char in password ) snake_case_ : str = any(char in digits for char in password ) snake_case_ : List[Any] = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = int(input("""Please indicate the max length of your password: """ ).strip() ) snake_case_ : List[str] = input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""" , password_generator(lowerCamelCase_ ) ) print( """Alternative Password generated:""" , alternative_password_generator(lowerCamelCase_ , lowerCamelCase_ ) , ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
8
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' if index == r: for j in range(lowerCamelCase_ ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location snake_case_ : List[str] = arr[i] combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict ): '''simple docstring''' # A temporary array to store all combination one by one snake_case_ : List[str] = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 0 , lowerCamelCase_ , 0 ) if __name__ == "__main__": # Driver code to check the function above __A : List[Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : Any = { 'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'], 'tokenization_ctrl': ['CTRLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A : Optional[int] = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType __A : str = get_logger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any]=0 ): '''simple docstring''' os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) with FSDP.state_dict_type( lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : str = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case_ : Dict = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Union[str, Any] = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : List[str] = os.path.join(lowerCamelCase_ , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) logger.info(F'''Saving model to {ckpt_dir}''' ) snake_case_ : Optional[Any] = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=lowerCamelCase_ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowerCamelCase_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return snake_case_ : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case_ : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case_ : List[str] = torch.load(lowerCamelCase_ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case_ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case_ : int = torch.load(lowerCamelCase_ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Union[str, Any] = ( os.path.join(lowerCamelCase_ , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) snake_case_ : Optional[int] = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=lowerCamelCase_ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , planner=DefaultLoadPlanner() , ) snake_case_ : str = state_dict["""model"""] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any]=0 ): '''simple docstring''' os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) with FSDP.state_dict_type( lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(lowerCamelCase_ , lowerCamelCase_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : int = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: snake_case_ : Dict = os.path.join(lowerCamelCase_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Dict=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Optional[Any] = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) snake_case_ : Union[str, Any] = torch.load(lowerCamelCase_ ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: snake_case_ : Dict = ( os.path.join(lowerCamelCase_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) snake_case_ : List[str] = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , ) snake_case_ : str = optim_state["""optimizer"""] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) snake_case_ : str = FSDP.optim_state_dict_to_load(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) optimizer.load_state_dict(lowerCamelCase_ )
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' return x + 2 class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :Optional[Any] ): snake_case_ : Union[str, Any] = """x = 3""" snake_case_ : str = {} snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3} ) snake_case_ : List[Any] = """x = y""" snake_case_ : Optional[Any] = {"""y""": 5} snake_case_ : Tuple = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 5, """y""": 5} ) def a__ ( self :Union[str, Any] ): snake_case_ : Union[str, Any] = """y = add_two(x)""" snake_case_ : Tuple = {"""x""": 3} snake_case_ : List[str] = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def a__ ( self :Optional[int] ): snake_case_ : Union[str, Any] = """x = 3""" snake_case_ : Optional[int] = {} snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3} ) def a__ ( self :Optional[int] ): snake_case_ : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" snake_case_ : Dict = {"""x""": 3} snake_case_ : Tuple = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} ) self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def a__ ( self :Optional[Any] ): snake_case_ : Any = """x = 3\ny = 5""" snake_case_ : Tuple = {} snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} ) def a__ ( self :Optional[Any] ): snake_case_ : Dict = """text = f'This is x: {x}.'""" snake_case_ : int = {"""x""": 3} snake_case_ : Dict = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """text""": """This is x: 3."""} ) def a__ ( self :List[Any] ): snake_case_ : Optional[int] = """if x <= 3:\n y = 2\nelse:\n y = 5""" snake_case_ : int = {"""x""": 3} snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 2} ) snake_case_ : Dict = {"""x""": 8} snake_case_ : Any = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 8, """y""": 5} ) def a__ ( self :Dict ): snake_case_ : Union[str, Any] = """test_list = [x, add_two(x)]""" snake_case_ : Tuple = {"""x""": 3} snake_case_ : Any = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,[3, 5] ) self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_list""": [3, 5]} ) def a__ ( self :Any ): snake_case_ : int = """y = x""" snake_case_ : int = {"""x""": 3} snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 3} ) def a__ ( self :Any ): snake_case_ : str = """test_list = [x, add_two(x)]\ntest_list[1]""" snake_case_ : Tuple = {"""x""": 3} snake_case_ : Union[str, Any] = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_list""": [3, 5]} ) snake_case_ : List[Any] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" snake_case_ : Tuple = {"""x""": 3} snake_case_ : str = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def a__ ( self :Dict ): snake_case_ : List[Any] = """x = 0\nfor i in range(3):\n x = i""" snake_case_ : Dict = {} snake_case_ : Dict = evaluate(_UpperCamelCase ,{"""range""": range} ,state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase ,{"""x""": 2, """i""": 2} )
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
'''simple docstring''' __A : Tuple = 65_521 def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = 1 snake_case_ : List[Any] = 0 for plain_chr in plain_text: snake_case_ : Union[str, Any] = (a + ord(lowerCamelCase_ )) % MOD_ADLER snake_case_ : Any = (b + a) % MOD_ADLER return (b << 16) | a
8
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
1
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __A : List[Any] = logging.get_logger(__name__) def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[Any] = set() snake_case_ : Optional[Any] = [] def parse_line(lowerCamelCase_ :Union[str, Any] ): for line in fp: if isinstance(lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : Union[str, Any] = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(lowerCamelCase_ ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(lowerCamelCase_ ) buffer.clear() continue else: snake_case_ : Any = line.strip() buffer.append(lowerCamelCase_ ) if from_gh: for filename in os.listdir(lowerCamelCase_ ): snake_case_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if not os.path.isdir(lowerCamelCase_ ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase_ ) as fp: parse_line(lowerCamelCase_ ) else: try: with zipfile.ZipFile(lowerCamelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase_ ) as fp: parse_line(lowerCamelCase_ ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = set() snake_case_ : Tuple = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) ) return selected_warnings if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' return values.split(""",""" ) __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) __A : Union[str, Any] = parser.parse_args() __A : Tuple = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __A : Dict = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __A : int = extract_warnings(args.output_dir, args.targets) __A : Dict = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
8
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __A : Any = 256_047 __A : List[str] = 256_145 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Union[str, Any] = NllbTokenizer lowercase : Optional[Any] = NllbTokenizerFast lowercase : Tuple = True lowercase : Optional[int] = True lowercase : Union[str, Any] = {} def a__ ( self :Tuple ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ : List[str] = NllbTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self :Dict ): snake_case_ : Dict = NllbTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase ) snake_case_ : List[str] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_UpperCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,) snake_case_ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _UpperCamelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase ,[ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] ,) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def a__ ( self :int ): snake_case_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = self.tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = tempfile.mkdtemp() snake_case_ : Tuple = tokenizer_r.save_pretrained(_UpperCamelCase ) snake_case_ : str = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) snake_case_ : str = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(_UpperCamelCase ,_UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ : Dict = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ : int = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=True snake_case_ : List[Any] = tempfile.mkdtemp() snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_UpperCamelCase ,legacy_format=_UpperCamelCase ) snake_case_ : Union[str, Any] = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_UpperCamelCase ,_UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ : Optional[int] = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ : int = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=False snake_case_ : List[str] = tempfile.mkdtemp() snake_case_ : List[str] = tokenizer_r.save_pretrained(_UpperCamelCase ,legacy_format=_UpperCamelCase ) snake_case_ : Union[str, Any] = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ : str = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase ,_UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) @require_torch def a__ ( self :List[str] ): if not self.test_seqaseq: return snake_case_ : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. snake_case_ : int = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] snake_case_ : Union[str, Any] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: snake_case_ : Any = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase ,tgt_texts=_UpperCamelCase ,max_length=3 ,max_target_length=1_0 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,1_0 ) # max_target_length will default to max_length if not specified snake_case_ : Tuple = tokenizer.prepare_seqaseq_batch( _UpperCamelCase ,tgt_texts=_UpperCamelCase ,max_length=3 ,return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) snake_case_ : str = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase ,max_length=3 ,max_target_length=1_0 ,return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn("""decoder_input_ids""" ,_UpperCamelCase ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def a__ ( self :List[Any] ): pass def a__ ( self :Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Dict = [AddedToken("""<special>""" ,lstrip=_UpperCamelCase )] snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Any = tokenizer_r.encode("""Hey this is a <special> token""" ) snake_case_ : int = tokenizer_r.encode("""<special>""" ,add_special_tokens=_UpperCamelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: snake_case_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained( _UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" ) snake_case_ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(_UpperCamelCase ,_UpperCamelCase ) self.assertEqual(_UpperCamelCase ,_UpperCamelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): lowercase : str = 'facebook/nllb-200-distilled-600M' lowercase : Any = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase : Optional[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase : Any = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def a__ ( cls :Union[str, Any] ): snake_case_ : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ) snake_case_ : Optional[Any] = 1 return cls def a__ ( self :List[Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,2_5_6_0_5_7 ) def a__ ( self :int ): snake_case_ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,_UpperCamelCase ) def a__ ( self :List[str] ): self.assertIn(_UpperCamelCase ,self.tokenizer.all_special_ids ) # fmt: off snake_case_ : int = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on snake_case_ : List[Any] = self.tokenizer.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ) snake_case_ : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_UpperCamelCase ) self.assertEqual(_UpperCamelCase ,_UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token ,_UpperCamelCase ) def a__ ( self :Any ): snake_case_ : Tuple = ["""this is gunna be a long sentence """ * 2_0] assert isinstance(src_text[0] ,_UpperCamelCase ) snake_case_ : List[str] = 1_0 snake_case_ : int = self.tokenizer(_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,_UpperCamelCase ) self.assertEqual(len(_UpperCamelCase ) ,_UpperCamelCase ) def a__ ( self :List[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[2_5_6_2_0_3, 3] ) def a__ ( self :Optional[Any] ): snake_case_ : Tuple = tempfile.mkdtemp() snake_case_ : Any = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ : str = NllbTokenizer.from_pretrained(_UpperCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_UpperCamelCase ) @require_torch def a__ ( self :Optional[int] ): snake_case_ : Optional[Any] = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) snake_case_ : List[Any] = shift_tokens_right( batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase ) self.assertEqual((2, 1_5) ,batch.input_ids.shape ) self.assertEqual((2, 1_5) ,batch.attention_mask.shape ) snake_case_ : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,_UpperCamelCase ) self.assertEqual(_UpperCamelCase ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def a__ ( self :int ): snake_case_ : Optional[int] = self.tokenizer(self.src_text ,padding=_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=3 ,return_tensors="""pt""" ) snake_case_ : List[Any] = self.tokenizer( text_target=self.tgt_text ,padding=_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=1_0 ,return_tensors="""pt""" ) snake_case_ : Optional[Any] = targets["""input_ids"""] snake_case_ : Optional[Any] = shift_tokens_right( _UpperCamelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0 ) @require_torch def a__ ( self :Optional[int] ): snake_case_ : int = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(_UpperCamelCase ) ,{ # A, test, EOS, en_XX """input_ids""": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 2_5_6_0_5_7, } ,) @require_torch def a__ ( self :Optional[int] ): snake_case_ : Optional[Any] = True snake_case_ : Optional[Any] = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) snake_case_ : int = False snake_case_ : int = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
8
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets __A : int = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' __A : Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' __A : List[Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def a__ ( self :Any ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] ,) def a__ ( self :Optional[Any] ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def a__ ( self :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Any="uniform_average" ,_UpperCamelCase :int=True ): snake_case_ : int = mean_squared_error( _UpperCamelCase ,_UpperCamelCase ,sample_weight=_UpperCamelCase ,multioutput=_UpperCamelCase ,squared=_UpperCamelCase ) return {"mse": mse}
8
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
1
'''simple docstring''' from __future__ import annotations from math import gcd def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int = 2 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 3 , ): '''simple docstring''' # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: return (pow(lowerCamelCase_ , 2 ) + step) % modulus for _ in range(lowerCamelCase_ ): # These track the position within the cycle detection logic. snake_case_ : str = seed snake_case_ : Optional[Any] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case_ : Tuple = rand_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : List[str] = rand_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : str = rand_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case_ : Union[str, Any] = gcd(hare - tortoise , lowerCamelCase_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case_ : Dict = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __A : Optional[int] = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __A : Any = parser.parse_args() __A : List[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'{args.num} is probably prime') else: __A : List[str] = args.num // divisor print(F'{args.num} = {divisor} * {quotient}')
8
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) snake_case_ : Union[str, Any] = precision snake_case_ : int = ceil(precision / 14 ) snake_case_ : Optional[int] = 42_68_80 * Decimal(1_00_05 ).sqrt() snake_case_ : List[Any] = 1 snake_case_ : List[Any] = 13_59_14_09 snake_case_ : Any = Decimal(lowerCamelCase_ ) for k in range(1 , lowerCamelCase_ ): snake_case_ : Dict = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase_ ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __A : int = 50 print(F'The first {n} digits of pi is: {pi(n)}')
8
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
1
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
1
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __A : Optional[int] = 16 __A : Optional[Any] = 32 def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self :Optional[int] ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero snake_case_ : Optional[Any] = torch.cuda.memory_allocated() return self def __exit__( self :List[Any] ,*_UpperCamelCase :Union[str, Any] ): gc.collect() torch.cuda.empty_cache() snake_case_ : int = torch.cuda.memory_allocated() snake_case_ : Optional[int] = torch.cuda.max_memory_allocated() snake_case_ : Dict = bamb(self.end - self.begin ) snake_case_ : List[str] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def UpperCAmelCase ( lowerCamelCase_ :Accelerator , lowerCamelCase_ :int = 16 , lowerCamelCase_ :str = "bert-base-cased" , lowerCamelCase_ :int = 3_20 , lowerCamelCase_ :int = 1_60 , ): '''simple docstring''' snake_case_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) snake_case_ : str = load_dataset( """glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} ) def tokenize_function(lowerCamelCase_ :Any ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ : Optional[Any] = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ :List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. snake_case_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) snake_case_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Any ): '''simple docstring''' # Initialize accelerator snake_case_ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Any = config["""lr"""] snake_case_ : List[Any] = int(config["""num_epochs"""] ) snake_case_ : str = int(config["""seed"""] ) snake_case_ : Union[str, Any] = int(config["""batch_size"""] ) snake_case_ : Optional[Any] = args.model_name_or_path set_seed(lowerCamelCase_ ) snake_case_ , snake_case_ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) # Instantiate optimizer snake_case_ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: snake_case_ : Optional[int] = 1 snake_case_ : Any = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ : List[Any] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , ) else: snake_case_ : List[str] = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # We need to keep track of how many total steps we have iterated over snake_case_ : List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ : List[str] = 0 # Now we train the model snake_case_ : Tuple = {} for epoch in range(lowerCamelCase_ , lowerCamelCase_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCamelCase_ ): snake_case_ : Any = model(**lowerCamelCase_ ) snake_case_ : List[str] = outputs.loss snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) snake_case_ : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase_ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=lowerCamelCase_ , default=3_20 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=lowerCamelCase_ , default=1_60 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase_ , default=1 , help="""Number of train epochs.""" , ) snake_case_ : Any = parser.parse_args() snake_case_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
8
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : List[Any] = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __UpperCamelCase ( lowercase__ ): lowercase : str = 'xlm-roberta-xl' def __init__( self :List[str] ,_UpperCamelCase :str=2_5_0_8_8_0 ,_UpperCamelCase :str=2_5_6_0 ,_UpperCamelCase :str=3_6 ,_UpperCamelCase :Optional[int]=3_2 ,_UpperCamelCase :int=1_0_2_4_0 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :List[str]=5_1_4 ,_UpperCamelCase :Optional[Any]=1 ,_UpperCamelCase :Dict=0.02 ,_UpperCamelCase :Any=1E-0_5 ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :str=0 ,_UpperCamelCase :int=2 ,_UpperCamelCase :List[str]="absolute" ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Tuple=None ,**_UpperCamelCase :List[str] ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Any = vocab_size snake_case_ : Any = hidden_size snake_case_ : int = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : List[str] = hidden_act snake_case_ : str = intermediate_size snake_case_ : Tuple = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Tuple = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : Any = initializer_range snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : int = position_embedding_type snake_case_ : Dict = use_cache snake_case_ : Optional[Any] = classifier_dropout class __UpperCamelCase ( lowercase__ ): @property def a__ ( self :str ): if self.task == "multiple-choice": snake_case_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
8
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'} __A : Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } __A : Any = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } __A : Optional[int] = '▁' class __UpperCamelCase ( lowercase__ ): lowercase : Dict = VOCAB_FILES_NAMES lowercase : str = PRETRAINED_VOCAB_FILES_MAP lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = ['input_ids', 'attention_mask'] def __init__( self :Dict ,_UpperCamelCase :Dict ,_UpperCamelCase :str="<s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :str="</s>" ,_UpperCamelCase :str="<s>" ,_UpperCamelCase :Tuple="<unk>" ,_UpperCamelCase :str="<pad>" ,_UpperCamelCase :Union[str, Any]="<mask>" ,_UpperCamelCase :Optional[Dict[str, Any]] = None ,**_UpperCamelCase :Optional[Any] ,): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : Tuple = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCamelCase ,) snake_case_ : Optional[Any] = vocab_file snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) snake_case_ : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} snake_case_ : Optional[Any] = len(self.sp_model ) - 1 snake_case_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def a__ ( self :Optional[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : str = [self.cls_token_id] snake_case_ : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self :Any ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ,_UpperCamelCase :bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def a__ ( self :Optional[int] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a__ ( self :Dict ): return len(self.sp_model ) def a__ ( self :str ): snake_case_ : List[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self :int ,_UpperCamelCase :str ): return self.sp_model.encode(_UpperCamelCase ,out_type=_UpperCamelCase ) def a__ ( self :List[Any] ,_UpperCamelCase :Tuple ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ : Union[str, Any] = self.sp_model.PieceToId(_UpperCamelCase ) return spm_id if spm_id else self.unk_token_id def a__ ( self :Tuple ,_UpperCamelCase :List[str] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_UpperCamelCase ) def a__ ( self :List[Any] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = [] snake_case_ : List[str] = """""" snake_case_ : Optional[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ : int = True snake_case_ : List[Any] = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ : Optional[int] = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def __getstate__( self :Dict ): snake_case_ : Tuple = self.__dict__.copy() snake_case_ : Optional[Any] = None return state def __setstate__( self :Union[str, Any] ,_UpperCamelCase :Optional[Any] ): snake_case_ : str = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): snake_case_ : List[str] = {} snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a__ ( self :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : str = os.path.join( _UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase ,"""wb""" ) as fi: snake_case_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
8
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
1
'''simple docstring''' __A : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_93_44, "knot": 1.8_52, } __A : dict[str, float] = { "km/h": 1.0, "m/s": 0.2_77_77_77_78, "mph": 0.6_21_37_11_92, "knot": 0.5_39_95_68_03, } def UpperCAmelCase ( lowerCamelCase_ :float , lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: snake_case_ : str = ( F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' F'''Valid values are: {", ".join(lowerCamelCase_ )}''' ) raise ValueError(lowerCamelCase_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
1
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : List[Any] = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'roformer' def __init__( self :Dict ,_UpperCamelCase :str=5_0_0_0_0 ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Any=7_6_8 ,_UpperCamelCase :List[Any]=1_2 ,_UpperCamelCase :List[str]=1_2 ,_UpperCamelCase :Dict=3_0_7_2 ,_UpperCamelCase :Dict="gelu" ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :Union[str, Any]=1_5_3_6 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :Optional[Any]=1E-1_2 ,_UpperCamelCase :int=0 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :Tuple=True ,**_UpperCamelCase :Any ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : List[str] = hidden_size if embedding_size is None else embedding_size snake_case_ : Optional[int] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : Dict = hidden_act snake_case_ : Union[str, Any] = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : Tuple = type_vocab_size snake_case_ : List[Any] = initializer_range snake_case_ : Dict = layer_norm_eps snake_case_ : List[str] = rotary_value snake_case_ : int = use_cache class __UpperCamelCase ( lowercase__ ): @property def a__ ( self :int ): if self.task == "multiple-choice": snake_case_ : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : Any = {0: """batch""", 1: """sequence"""} snake_case_ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
8
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
1
'''simple docstring''' from math import factorial def UpperCAmelCase ( lowerCamelCase_ :int = 1_00 ): '''simple docstring''' return sum(int(lowerCamelCase_ ) for x in str(factorial(lowerCamelCase_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
8
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
1
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __A : Dict = logging.get_logger(__name__) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : Tuple = os.path.abspath(lowerCamelCase_ ) logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model snake_case_ : Tuple = tf.train.list_variables(lowerCamelCase_ ) snake_case_ : List[str] = [] snake_case_ : str = [] snake_case_ : List[str] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") snake_case_ : Optional[Any] = full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(F'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' snake_case_ : Optional[int] = name[1:] # figure out how many levels deep the name is snake_case_ : Tuple = 0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(lowerCamelCase_ ) # read data snake_case_ : Tuple = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ ) names.append("""/""".join(lowerCamelCase_ ) ) arrays.append(lowerCamelCase_ ) logger.info(F'''Read a total of {len(lowerCamelCase_ ):,} layers''' ) # Sanity check if len(set(lowerCamelCase_ ) ) != 1: raise ValueError(F'''Found layer names with different depths (layer depth {list(set(lowerCamelCase_ ) )})''' ) snake_case_ : Optional[int] = list(set(lowerCamelCase_ ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : Optional[Any] = full_name.split("""/""" ) snake_case_ : Any = model snake_case_ : List[str] = [] for i, m_name in enumerate(lowerCamelCase_ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): snake_case_ : int = int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) snake_case_ : Optional[int] = getattr(lowerCamelCase_ , """embeddings""" ) snake_case_ : Union[str, Any] = getattr(lowerCamelCase_ , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) snake_case_ : List[str] = getattr(lowerCamelCase_ , """encoder""" ) snake_case_ : List[Any] = getattr(lowerCamelCase_ , """layer""" ) snake_case_ : List[str] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) snake_case_ : Optional[int] = getattr(lowerCamelCase_ , """pooler""" ) snake_case_ : Any = getattr(lowerCamelCase_ , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) snake_case_ : List[Any] = getattr(lowerCamelCase_ , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) snake_case_ : Dict = getattr(lowerCamelCase_ , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) snake_case_ : int = getattr(lowerCamelCase_ , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) snake_case_ : str = getattr(lowerCamelCase_ , """token_type_embeddings""" ) else: raise ValueError(F'''Unknown embedding layer with name {full_name}''' ) trace.append("""weight""" ) snake_case_ : Tuple = getattr(lowerCamelCase_ , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) snake_case_ : List[Any] = getattr(lowerCamelCase_ , """attention""" ) snake_case_ : Tuple = getattr(lowerCamelCase_ , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) snake_case_ : int = getattr(lowerCamelCase_ , """attention""" ) snake_case_ : Optional[Any] = getattr(lowerCamelCase_ , """output""" ) snake_case_ : Any = getattr(lowerCamelCase_ , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) snake_case_ : Optional[Any] = getattr(lowerCamelCase_ , """attention""" ) snake_case_ : Optional[Any] = getattr(lowerCamelCase_ , """output""" ) snake_case_ : Optional[Any] = getattr(lowerCamelCase_ , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) snake_case_ : List[Any] = getattr(lowerCamelCase_ , """output""" ) snake_case_ : List[Any] = getattr(lowerCamelCase_ , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) snake_case_ : str = getattr(lowerCamelCase_ , """output""" ) snake_case_ : Optional[int] = getattr(lowerCamelCase_ , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) snake_case_ : int = getattr(lowerCamelCase_ , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) snake_case_ : Union[str, Any] = getattr(lowerCamelCase_ , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) snake_case_ : int = getattr(lowerCamelCase_ , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) snake_case_ : List[str] = getattr(lowerCamelCase_ , """intermediate""" ) snake_case_ : Optional[int] = getattr(lowerCamelCase_ , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) snake_case_ : Any = getattr(lowerCamelCase_ , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) snake_case_ : Any = getattr(lowerCamelCase_ , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) snake_case_ : List[str] = getattr(lowerCamelCase_ , """weight""" ) else: logger.warning(F'''Ignored {m_name}''' ) # for certain layers reshape is necessary snake_case_ : List[Any] = """.""".join(lowerCamelCase_ ) if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCamelCase_ ) or re.match( R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCamelCase_ ): snake_case_ : Optional[Any] = array.reshape(pointer.data.shape ) if "kernel" in full_name: snake_case_ : Dict = array.transpose() if pointer.shape == array.shape: snake_case_ : Optional[Any] = torch.from_numpy(lowerCamelCase_ ) else: raise ValueError( F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' F''' {array.shape}''' ) logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str ): '''simple docstring''' # Instantiate model logger.info(F'''Loading model based on config from {config_path}...''' ) snake_case_ : Any = BertConfig.from_json_file(lowerCamelCase_ ) snake_case_ : List[Any] = BertModel(lowerCamelCase_ ) # Load weights from checkpoint logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) __A : List[str] = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
8
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def UpperCAmelCase ( lowerCamelCase_ :np.ndarray ): '''simple docstring''' snake_case_ , snake_case_ , snake_case_ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def UpperCAmelCase ( lowerCamelCase_ :np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def UpperCAmelCase ( lowerCamelCase_ :np.ndarray , lowerCamelCase_ :np.ndarray ): '''simple docstring''' snake_case_ : Union[str, Any] = np.zeros_like(lowerCamelCase_ ) snake_case_ : Union[str, Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image snake_case_ : List[str] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): snake_case_ : Optional[int] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() snake_case_ : Dict = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : Optional[int] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' __A : int = np.array(Image.open(lena_path)) # kernel to be applied __A : Optional[int] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : int = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
8
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __A : Any = None __A : Optional[Any] = logging.get_logger(__name__) __A : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __A : Any = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } __A : List[Any] = { 'camembert-base': 512, } __A : Union[str, Any] = '▁' class __UpperCamelCase ( lowercase__ ): lowercase : Dict = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] = ['input_ids', 'attention_mask'] lowercase : List[str] = CamembertTokenizer def __init__( self :Optional[Any] ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Any=None ,_UpperCamelCase :List[Any]="<s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :Dict="<s>" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :List[str]="<pad>" ,_UpperCamelCase :Union[str, Any]="<mask>" ,_UpperCamelCase :Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] ,**_UpperCamelCase :str ,): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : List[str] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token super().__init__( _UpperCamelCase ,tokenizer_file=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Dict = vocab_file snake_case_ : Tuple = False if not self.vocab_file else True def a__ ( self :List[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] snake_case_ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Any = os.path.join( _UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file ,_UpperCamelCase ) return (out_vocab_file,)
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
8
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int = 10_00 ): '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
1
'''simple docstring''' from collections import Counter from timeit import timeit def UpperCAmelCase ( lowerCamelCase_ :str = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def UpperCAmelCase ( lowerCamelCase_ :str = "" ): '''simple docstring''' if len(lowerCamelCase_ ) == 0: return True snake_case_ : List[Any] = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string snake_case_ : dict[str, int] = {} for character in lower_case_input_str: snake_case_ : Dict = character_freq_dict.get(lowerCamelCase_ , 0 ) + 1 snake_case_ : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def UpperCAmelCase ( lowerCamelCase_ :str = "" ): '''simple docstring''' print("""\nFor string = """ , lowerCamelCase_ , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase_ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowerCamelCase_ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": __A : str = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) __A : int = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : Dict = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __UpperCamelCase ( lowercase__ ): lowercase : Dict = 'gpt_neo' lowercase : Optional[Any] = ['past_key_values'] lowercase : List[Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self :List[str] ,_UpperCamelCase :List[Any]=5_0_2_5_7 ,_UpperCamelCase :Union[str, Any]=2_0_4_8 ,_UpperCamelCase :List[str]=2_0_4_8 ,_UpperCamelCase :str=2_4 ,_UpperCamelCase :Dict=[[["global", "local"], 1_2]] ,_UpperCamelCase :Union[str, Any]=1_6 ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Dict=2_5_6 ,_UpperCamelCase :Any="gelu_new" ,_UpperCamelCase :List[str]=0.0 ,_UpperCamelCase :Dict=0.0 ,_UpperCamelCase :Any=0.0 ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :str=1E-5 ,_UpperCamelCase :str=0.02 ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Any=5_0_2_5_6 ,_UpperCamelCase :Any=5_0_2_5_6 ,**_UpperCamelCase :int ,): snake_case_ : str = vocab_size snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : Optional[Any] = hidden_size snake_case_ : List[Any] = num_layers snake_case_ : Optional[int] = num_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[int] = window_size snake_case_ : Tuple = activation_function snake_case_ : Union[str, Any] = resid_dropout snake_case_ : Optional[Any] = embed_dropout snake_case_ : List[str] = attention_dropout snake_case_ : Optional[int] = classifier_dropout snake_case_ : Optional[Any] = layer_norm_epsilon snake_case_ : Optional[int] = initializer_range snake_case_ : List[str] = use_cache snake_case_ : int = bos_token_id snake_case_ : Dict = eos_token_id snake_case_ : List[str] = attention_types snake_case_ : Any = self.expand_attention_types_params(_UpperCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) @staticmethod def a__ ( _UpperCamelCase :List[Any] ): snake_case_ : Union[str, Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] ): '''simple docstring''' import torch snake_case_ : str = input.size() snake_case_ : int = len(lowerCamelCase_ ) snake_case_ : int = shape[dimension] snake_case_ : Dict = torch.arange(0 , lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : List[str] = torch.div(sizedim - size , lowerCamelCase_ , rounding_mode="""floor""" ) + 1 snake_case_ : str = torch.arange(lowerCamelCase_ ) + low_indices[:min_length][:, None] snake_case_ : List[Any] = [slice(lowerCamelCase_ )] * rank snake_case_ : List[Any] = indices snake_case_ : str = input[s] snake_case_ : Optional[int] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' import torch snake_case_ : Tuple = torch.arange(1 , lowerCamelCase_ ) snake_case_ : List[Any] = torch.remainder(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : Union[str, Any] = remainders == 0 snake_case_ : Union[str, Any] = candidates[divisor_indices] snake_case_ : Optional[Any] = torch.max(lowerCamelCase_ ) return largest_divisor, torch.div(lowerCamelCase_ , lowerCamelCase_ , rounding_mode="""floor""" ) class __UpperCamelCase ( lowercase__ ): @property def a__ ( self :Tuple ): snake_case_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCamelCase ,direction="""inputs""" ) snake_case_ : int = {0: """batch""", 1: """past_sequence + sequence"""} else: snake_case_ : Any = {0: """batch""", 1: """sequence"""} return common_inputs @property def a__ ( self :Optional[Any] ): return self._config.num_heads def a__ ( self :Optional[Any] ,_UpperCamelCase :PreTrainedTokenizer ,_UpperCamelCase :int = -1 ,_UpperCamelCase :int = -1 ,_UpperCamelCase :bool = False ,_UpperCamelCase :Optional[TensorType] = None ,): snake_case_ : List[str] = super(_UpperCamelCase ,self ).generate_dummy_inputs( _UpperCamelCase ,batch_size=_UpperCamelCase ,seq_length=_UpperCamelCase ,is_pair=_UpperCamelCase ,framework=_UpperCamelCase ) # We need to order the input in the way they appears in the forward() snake_case_ : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case_ , snake_case_ : Any = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values snake_case_ : Any = seqlen + 2 snake_case_ : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ : Optional[Any] = [ (torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers ) ] snake_case_ : List[str] = common_inputs["""attention_mask"""] if self.use_past: snake_case_ : List[Any] = ordered_inputs["""attention_mask"""].dtype snake_case_ : Tuple = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_UpperCamelCase ,_UpperCamelCase ,dtype=_UpperCamelCase )] ,dim=1 ) return ordered_inputs @property def a__ ( self :List[str] ): return 1_3
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
'''simple docstring''' from __future__ import annotations __A : Tuple = list[list[int]] # assigning initial values to the grid __A : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __A : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCAmelCase ( lowerCamelCase_ :Matrix , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' if location := find_empty_location(lowerCamelCase_ ): snake_case_ , snake_case_ : str = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : str = digit if sudoku(lowerCamelCase_ ) is not None: return grid snake_case_ : Any = 0 return None def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' for row in grid: for cell in row: print(lowerCamelCase_ , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __A : Tuple = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
8
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
8
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
1
'''simple docstring''' import random class __UpperCamelCase : @staticmethod def a__ ( _UpperCamelCase :str ): snake_case_ : Any = [ord(_UpperCamelCase ) for i in text] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] for i in plain: snake_case_ : Dict = random.randint(1 ,3_0_0 ) snake_case_ : List[Any] = (i + k) * k cipher.append(_UpperCamelCase ) key.append(_UpperCamelCase ) return cipher, key @staticmethod def a__ ( _UpperCamelCase :list[int] ,_UpperCamelCase :list[int] ): snake_case_ : str = [] for i in range(len(_UpperCamelCase ) ): snake_case_ : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_UpperCamelCase ) ) return "".join(_UpperCamelCase ) if __name__ == "__main__": __A, __A : Dict = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
8
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : dict[int, int] = {} snake_case_ : List[str] = 2 while True: snake_case_ : Union[str, Any] = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ ) if factor: snake_case_ : Any = factor + prime while x in factor_map: x += factor snake_case_ : List[Any] = factor else: snake_case_ : int = prime yield prime prime += 1 def UpperCAmelCase ( lowerCamelCase_ :float = 1E10 ): '''simple docstring''' snake_case_ : Optional[Any] = sieve() snake_case_ : Tuple = 1 while True: snake_case_ : Optional[Any] = next(lowerCamelCase_ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCamelCase_ ) n += 2 if __name__ == "__main__": print(solution())
8
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
1
'''simple docstring''' import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging __A : List[Any] = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] __A : Dict = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() __A : List[str] = logging.get_logger(__name__) __A : Union[str, Any] = ' Hello world! cécé herlolip' __A : Union[str, Any] = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def UpperCAmelCase ( lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : Union[str, Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : Tuple = dct.pop(lowerCamelCase_ ) snake_case_ : Union[str, Any] = val def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[str] = torch.load(lowerCamelCase_ , map_location="""cpu""" ) snake_case_ : List[str] = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' snake_case_ , snake_case_ : Optional[int] = emb.weight.shape snake_case_ : List[str] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) snake_case_ : Tuple = emb.weight.data return lin_layer @torch.no_grad() def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any]=None ): '''simple docstring''' if not os.path.exists(lowerCamelCase_ ): snake_case_ : Tuple = torch.hub.load("""pytorch/fairseq""" , lowerCamelCase_ ).eval() else: snake_case_ : Tuple = load_xsum_checkpoint(lowerCamelCase_ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case_ : List[str] = checkpoint_path.replace(""".""" , """-""" ) snake_case_ : str = BartConfig.from_pretrained(lowerCamelCase_ ) snake_case_ : str = bart.encode(lowerCamelCase_ ).unsqueeze(0 ) snake_case_ : Union[str, Any] = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": snake_case_ : Tuple = bart.state_dict() remove_ignore_keys_(lowerCamelCase_ ) snake_case_ : Optional[Any] = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : int = BartForSequenceClassification(lowerCamelCase_ ).eval() model.load_state_dict(lowerCamelCase_ ) snake_case_ : Tuple = bart.predict("""mnli""" , lowerCamelCase_ , return_logits=lowerCamelCase_ ) snake_case_ : Union[str, Any] = model(lowerCamelCase_ )[0] # logits else: # no classification heads to worry about snake_case_ : int = bart.model.state_dict() remove_ignore_keys_(lowerCamelCase_ ) snake_case_ : Tuple = state_dict["""decoder.embed_tokens.weight"""] snake_case_ : Dict = bart.extract_features(lowerCamelCase_ ) if hf_checkpoint_name == "facebook/bart-large": snake_case_ : Union[str, Any] = BartModel(lowerCamelCase_ ).eval() model.load_state_dict(lowerCamelCase_ ) snake_case_ : List[str] = model(lowerCamelCase_ ).model[0] else: snake_case_ : Union[str, Any] = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt model.model.load_state_dict(lowerCamelCase_ ) if hasattr(lowerCamelCase_ , """lm_head""" ): snake_case_ : Tuple = make_linear_from_emb(model.model.shared ) snake_case_ : Union[str, Any] = model.model(lowerCamelCase_ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) __A : Optional[Any] = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __A : Optional[Any] = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = 'philschmid/bart-large-cnn-samsum' lowercase : Tuple = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) lowercase : Union[str, Any] = 'summarizer' lowercase : List[str] = AutoTokenizer lowercase : List[Any] = AutoModelForSeqaSeqLM lowercase : Union[str, Any] = ['text'] lowercase : Tuple = ['text'] def a__ ( self :str ,_UpperCamelCase :Optional[int] ): return self.pre_processor(_UpperCamelCase ,return_tensors="""pt""" ,truncation=_UpperCamelCase ) def a__ ( self :Optional[Any] ,_UpperCamelCase :str ): return self.model.generate(**_UpperCamelCase )[0] def a__ ( self :str ,_UpperCamelCase :str ): return self.pre_processor.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
8
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int = 2_00 ): '''simple docstring''' snake_case_ : Dict = [1, 2, 5, 10, 20, 50, 1_00, 2_00] snake_case_ : Optional[Any] = [0] * (pence + 1) snake_case_ : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCamelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
8
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
1
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Union[str, Any] = FlaxAutoencoderKL @property def a__ ( self :Optional[Any] ): snake_case_ : List[str] = 4 snake_case_ : Optional[int] = 3 snake_case_ : Optional[int] = (3_2, 3_2) snake_case_ : Union[str, Any] = jax.random.PRNGKey(0 ) snake_case_ : Optional[int] = jax.random.uniform(_UpperCamelCase ,((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def a__ ( self :Any ): snake_case_ : Optional[int] = { """block_out_channels""": [3_2, 6_4], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } snake_case_ : Tuple = self.dummy_input return init_dict, inputs_dict
8
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[Any] = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def a__ ( self :List[str] ,_UpperCamelCase :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ): snake_case_ : Optional[int] = hf_hub_download( repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" ) snake_case_ : Any = VideoClassificationPipeline(model=_UpperCamelCase ,image_processor=_UpperCamelCase ,top_k=2 ) snake_case_ : Dict = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any] ): for example in examples: snake_case_ : int = video_classifier(_UpperCamelCase ) self.assertEqual( _UpperCamelCase ,[ {"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )}, {"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )}, ] ,) @require_torch def a__ ( self :Union[str, Any] ): snake_case_ : str = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" snake_case_ : Tuple = VideoMAEFeatureExtractor( size={"""shortest_edge""": 1_0} ,crop_size={"""height""": 1_0, """width""": 1_0} ) snake_case_ : Union[str, Any] = pipeline( """video-classification""" ,model=_UpperCamelCase ,feature_extractor=_UpperCamelCase ,frame_sampling_rate=4 ) snake_case_ : int = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" ) snake_case_ : List[str] = video_classifier(_UpperCamelCase ,top_k=2 ) self.assertEqual( nested_simplify(_UpperCamelCase ,decimals=4 ) ,[{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}] ,) snake_case_ : Optional[Any] = video_classifier( [ video_file_path, video_file_path, ] ,top_k=2 ,) self.assertEqual( nested_simplify(_UpperCamelCase ,decimals=4 ) ,[ [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}], [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}], ] ,) @require_tf def a__ ( self :str ): pass
8
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Tuple ): snake_case_ : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) snake_case_ : Optional[Any] = tf.convert_to_tensor( [[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !" snake_case_ : Dict = model(_UpperCamelCase )["""last_hidden_state"""] snake_case_ : List[str] = tf.TensorShape((1, 1_0, 7_6_8) ) self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice. snake_case_ : Any = tf.convert_to_tensor( [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
8
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : int = 16 __A : Union[str, Any] = 32 def UpperCAmelCase ( lowerCamelCase_ :Accelerator , lowerCamelCase_ :int = 16 ): '''simple docstring''' snake_case_ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) snake_case_ : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase_ :Any ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : str = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : Dict = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ :Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : str = 16 elif accelerator.mixed_precision != "no": snake_case_ : List[Any] = 8 else: snake_case_ : Tuple = None return tokenizer.pad( lowerCamelCase_ , padding="""longest""" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. snake_case_ : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) snake_case_ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : int = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ): '''simple docstring''' # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase_ ) == "1": snake_case_ : List[Any] = 2 # Initialize accelerator snake_case_ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Optional[Any] = config["""lr"""] snake_case_ : List[str] = int(config["""num_epochs"""] ) snake_case_ : Optional[Any] = int(config["""seed"""] ) snake_case_ : int = int(config["""batch_size"""] ) snake_case_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCamelCase_ ) def inner_training_loop(lowerCamelCase_ :Dict ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : List[Any] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : int = AdamW(params=model.parameters() , lr=lowerCamelCase_ ) snake_case_ , snake_case_ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ ) # Instantiate scheduler snake_case_ : int = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Now we train the model for epoch in range(lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : List[Any] = model(**lowerCamelCase_ ) snake_case_ : Dict = outputs.loss accelerator.backward(lowerCamelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : str = model(**lowerCamelCase_ ) snake_case_ : int = outputs.logits.argmax(dim=-1 ) snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) snake_case_ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) snake_case_ : Optional[Any] = parser.parse_args() snake_case_ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
8
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A : Any = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = ['pixel_values'] def __init__( self :Optional[Any] ,_UpperCamelCase :bool = True ,_UpperCamelCase :Dict[str, int] = None ,_UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC ,_UpperCamelCase :bool = True ,_UpperCamelCase :Dict[str, int] = None ,_UpperCamelCase :bool = True ,_UpperCamelCase :Union[int, float] = 1 / 2_5_5 ,_UpperCamelCase :bool = True ,_UpperCamelCase :Optional[Union[float, List[float]]] = None ,_UpperCamelCase :Optional[Union[float, List[float]]] = None ,_UpperCamelCase :bool = True ,**_UpperCamelCase :Optional[int] ,): super().__init__(**_UpperCamelCase ) snake_case_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_2_4} snake_case_ : Dict = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase ) snake_case_ : int = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} snake_case_ : Union[str, Any] = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase ,param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : List[str] = resample snake_case_ : Tuple = do_center_crop snake_case_ : str = crop_size snake_case_ : Union[str, Any] = do_rescale snake_case_ : int = rescale_factor snake_case_ : Optional[int] = do_normalize snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ : Any = do_convert_rgb def a__ ( self :List[str] ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Dict[str, int] ,_UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :Union[str, Any] ,): snake_case_ : List[str] = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case_ : Optional[Any] = get_resize_output_image_size(_UpperCamelCase ,size=size["""shortest_edge"""] ,default_to_square=_UpperCamelCase ) return resize(_UpperCamelCase ,size=_UpperCamelCase ,resample=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :Any ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Dict[str, int] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :List[str] ,): snake_case_ : Any = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :List[Any] ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Union[int, float] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :List[Any] ,): return rescale(_UpperCamelCase ,scale=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :List[str] ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Union[float, List[float]] ,_UpperCamelCase :Union[float, List[float]] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :List[str] ,): return normalize(_UpperCamelCase ,mean=_UpperCamelCase ,std=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :Any ,_UpperCamelCase :ImageInput ,_UpperCamelCase :bool = None ,_UpperCamelCase :Dict[str, int] = None ,_UpperCamelCase :PILImageResampling = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :int = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :float = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :Optional[Union[float, List[float]]] = None ,_UpperCamelCase :Optional[Union[float, List[float]]] = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[ChannelDimension] = ChannelDimension.FIRST ,**_UpperCamelCase :Any ,): snake_case_ : Dict = do_resize if do_resize is not None else self.do_resize snake_case_ : Tuple = size if size is not None else self.size snake_case_ : str = get_size_dict(_UpperCamelCase ,param_name="""size""" ,default_to_square=_UpperCamelCase ) snake_case_ : List[str] = resample if resample is not None else self.resample snake_case_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : Optional[int] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(_UpperCamelCase ,param_name="""crop_size""" ,default_to_square=_UpperCamelCase ) snake_case_ : int = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : Optional[Any] = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ : List[str] = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ : Tuple = [convert_to_rgb(_UpperCamelCase ) for image in images] # All transformations expect numpy arrays. snake_case_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: snake_case_ : List[Any] = [self.resize(image=_UpperCamelCase ,size=_UpperCamelCase ,resample=_UpperCamelCase ) for image in images] if do_center_crop: snake_case_ : List[str] = [self.center_crop(image=_UpperCamelCase ,size=_UpperCamelCase ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=_UpperCamelCase ,scale=_UpperCamelCase ) for image in images] if do_normalize: snake_case_ : Dict = [self.normalize(image=_UpperCamelCase ,mean=_UpperCamelCase ,std=_UpperCamelCase ) for image in images] snake_case_ : Any = [to_channel_dimension_format(_UpperCamelCase ,_UpperCamelCase ) for image in images] snake_case_ : Any = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase ,tensor_type=_UpperCamelCase )
8
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
1
'''simple docstring''' import math def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A : Optional[Any] = 'Enter the base and the power separated by a comma: ' __A, __A : int = map(int, input(prompt).split(',')) __A, __A : str = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. __A : int = res(xa, ya) __A : str = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
8
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
1
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __A : Optional[Any] = logging.get_logger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : List[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: snake_case_ : int = 1_28 elif "12-12" in model_name: snake_case_ : Any = 12 snake_case_ : List[str] = 12 elif "14-14" in model_name: snake_case_ : Union[str, Any] = 14 snake_case_ : Dict = 14 elif "16-16" in model_name: snake_case_ : Tuple = 16 snake_case_ : Optional[int] = 16 else: raise ValueError("""Model not supported""" ) snake_case_ : Optional[int] = """huggingface/label-files""" if "speech-commands" in model_name: snake_case_ : List[str] = 35 snake_case_ : Any = """speech-commands-v2-id2label.json""" else: snake_case_ : Tuple = 5_27 snake_case_ : List[Any] = """audioset-id2label.json""" snake_case_ : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} snake_case_ : Optional[int] = idalabel snake_case_ : Tuple = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if "module.v" in name: snake_case_ : List[str] = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: snake_case_ : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: snake_case_ : Optional[Any] = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: snake_case_ : List[str] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: snake_case_ : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: snake_case_ : Dict = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: snake_case_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case_ : Optional[Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case_ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case_ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case_ : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ : int = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: snake_case_ : List[Any] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: snake_case_ : str = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: snake_case_ : Tuple = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ : List[Any] = orig_state_dict.pop(lowerCamelCase_ ) if "qkv" in key: snake_case_ : Dict = key.split(""".""" ) snake_case_ : Optional[Any] = int(key_split[3] ) snake_case_ : int = config.hidden_size if "weight" in key: snake_case_ : Optional[int] = val[:dim, :] snake_case_ : int = val[dim : dim * 2, :] snake_case_ : Union[str, Any] = val[-dim:, :] else: snake_case_ : List[str] = val[:dim] snake_case_ : int = val[dim : dim * 2] snake_case_ : Optional[int] = val[-dim:] else: snake_case_ : Optional[int] = val return orig_state_dict def UpperCAmelCase ( lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : str = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) @torch.no_grad() def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int=False ): '''simple docstring''' snake_case_ : Tuple = get_audio_spectrogram_transformer_config(lowerCamelCase_ ) snake_case_ : Optional[Any] = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict snake_case_ : List[Any] = model_name_to_url[model_name] snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" ) # remove some keys remove_keys(lowerCamelCase_ ) # rename some keys snake_case_ : Optional[int] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ ) # load 🤗 model snake_case_ : Optional[int] = ASTForAudioClassification(lowerCamelCase_ ) model.eval() model.load_state_dict(lowerCamelCase_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 snake_case_ : str = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978 snake_case_ : Any = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526 snake_case_ : Union[str, Any] = 10_24 if """speech-commands""" not in model_name else 1_28 snake_case_ : int = ASTFeatureExtractor(mean=lowerCamelCase_ , std=lowerCamelCase_ , max_length=lowerCamelCase_ ) if "speech-commands" in model_name: snake_case_ : str = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) snake_case_ : List[Any] = dataset[0]["""audio"""]["""array"""] else: snake_case_ : Dict = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) snake_case_ , snake_case_ : Optional[int] = torchaudio.load(lowerCamelCase_ ) snake_case_ : Optional[int] = waveform.squeeze().numpy() snake_case_ : List[Any] = feature_extractor(lowerCamelCase_ , sampling_rate=1_60_00 , return_tensors="""pt""" ) # forward pass snake_case_ : Optional[Any] = model(**lowerCamelCase_ ) snake_case_ : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": snake_case_ : List[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": snake_case_ : int = torch.tensor([-1.1_986, -7.0_903, -8.2_718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": snake_case_ : Dict = torch.tensor([-2.6_128, -8.0_080, -9.4_344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": snake_case_ : str = torch.tensor([-1.5_080, -7.4_534, -8.8_917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": snake_case_ : Optional[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": snake_case_ : int = torch.tensor([-0.3_826, -7.0_336, -8.2_413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": snake_case_ : List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] ) elif model_name == "ast-finetuned-speech-commands-v2": snake_case_ : Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A : int = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
8
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __A : Any = None __A : List[Any] = logging.get_logger(__name__) __A : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __A : List[Any] = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } __A : Optional[int] = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } __A : List[str] = '▁' class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = VOCAB_FILES_NAMES lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Optional[Any] = AlbertTokenizer def __init__( self :Optional[int] ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=None ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :int=False ,_UpperCamelCase :int="[CLS]" ,_UpperCamelCase :Optional[Any]="[SEP]" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :int="[SEP]" ,_UpperCamelCase :Optional[int]="<pad>" ,_UpperCamelCase :Any="[CLS]" ,_UpperCamelCase :Union[str, Any]="[MASK]" ,**_UpperCamelCase :List[Any] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ : List[str] = ( AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ,normalized=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token ) super().__init__( _UpperCamelCase ,tokenizer_file=_UpperCamelCase ,do_lower_case=_UpperCamelCase ,remove_space=_UpperCamelCase ,keep_accents=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = do_lower_case snake_case_ : str = remove_space snake_case_ : Dict = keep_accents snake_case_ : Optional[int] = vocab_file snake_case_ : Union[str, Any] = False if not self.vocab_file else True def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self :Tuple ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Dict = os.path.join( _UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file ,_UpperCamelCase ) return (out_vocab_file,)
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all( isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) snake_case_ : List[Any] = numbers[0] for i in range(1 , len(lowerCamelCase_ ) ): # update the maximum and minimum subarray products snake_case_ : str = numbers[i] if number < 0: snake_case_ , snake_case_ : Dict = min_till_now, max_till_now snake_case_ : Any = max(lowerCamelCase_ , max_till_now * number ) snake_case_ : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number ) # update the maximum product found till now snake_case_ : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) return max_prod
8
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
1
'''simple docstring''' from collections.abc import Callable import numpy as np def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :float ): '''simple docstring''' snake_case_ : Dict = int(np.ceil((x_end - xa) / step_size ) ) snake_case_ : List[Any] = np.zeros((n + 1,) ) snake_case_ : Dict = ya snake_case_ : Optional[int] = xa for k in range(lowerCamelCase_ ): snake_case_ : Tuple = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __UpperCamelCase : def __init__( self :Union[str, Any] ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=1_3 ,_UpperCamelCase :List[Any]=3_2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Optional[Any]=3 ,_UpperCamelCase :Optional[int]=1_6 ,_UpperCamelCase :int=[1, 2, 1] ,_UpperCamelCase :Dict=[2, 2, 4] ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Tuple=2.0 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Optional[int]=0.0 ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Union[str, Any]="gelu" ,_UpperCamelCase :Optional[int]=False ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :Union[str, Any]=1E-5 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :str=1_0 ,_UpperCamelCase :List[Any]=8 ,_UpperCamelCase :int=["stage1", "stage2", "stage3"] ,_UpperCamelCase :List[Any]=[1, 2, 3] ,): snake_case_ : Union[str, Any] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[int] = image_size snake_case_ : int = patch_size snake_case_ : List[str] = num_channels snake_case_ : Union[str, Any] = embed_dim snake_case_ : int = depths snake_case_ : int = num_heads snake_case_ : str = window_size snake_case_ : int = mlp_ratio snake_case_ : List[Any] = qkv_bias snake_case_ : int = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Any = drop_path_rate snake_case_ : Optional[Any] = hidden_act snake_case_ : List[str] = use_absolute_embeddings snake_case_ : List[Any] = patch_norm snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : List[str] = initializer_range snake_case_ : str = is_training snake_case_ : Dict = scope snake_case_ : Tuple = use_labels snake_case_ : Optional[int] = type_sequence_label_size snake_case_ : str = encoder_stride snake_case_ : str = out_features snake_case_ : List[Any] = out_indices def a__ ( self :List[str] ): snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[str] = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case_ : Tuple = self.get_config() return config, pixel_values, labels def a__ ( self :Dict ): return MaskFormerSwinConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Tuple ): snake_case_ : List[str] = MaskFormerSwinModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : str = model(_UpperCamelCase ) snake_case_ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ): snake_case_ : int = MaskFormerSwinBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : Dict = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,[1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_UpperCamelCase ): snake_case_ : Optional[Any] = ["""stem"""] snake_case_ : Optional[int] = MaskFormerSwinBackbone(config=_UpperCamelCase ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Any = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowercase : List[Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowercase : Any = False lowercase : Union[str, Any] = False lowercase : Optional[Any] = False lowercase : Optional[int] = False lowercase : List[str] = False def a__ ( self :Optional[Any] ): snake_case_ : str = MaskFormerSwinModelTester(self ) snake_case_ : List[str] = ConfigTester(self ,config_class=_UpperCamelCase ,embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def a__ ( self :List[Any] ): pass def a__ ( self :Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self :Optional[Any] ): return def a__ ( self :Dict ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :List[Any] ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase ) @unittest.skip("""Swin does not use inputs_embeds""" ) def a__ ( self :Union[str, Any] ): pass @unittest.skip("""Swin does not support feedforward chunking""" ) def a__ ( self :int ): pass def a__ ( self :Tuple ): snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) snake_case_ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase ,nn.Linear ) ) def a__ ( self :Dict ): snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Union[str, Any] = model_class(_UpperCamelCase ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[str] = [*signature.parameters.keys()] snake_case_ : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_UpperCamelCase ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def a__ ( self :Dict ): pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def a__ ( self :Union[str, Any] ): pass def a__ ( self :Any ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ): snake_case_ : Union[str, Any] = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): snake_case_ : Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) ) snake_case_ : Dict = outputs.hidden_states snake_case_ : Union[str, Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCamelCase ) ,_UpperCamelCase ) # Swin has a different seq_length snake_case_ : str = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def a__ ( self :List[Any] ): snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ : Optional[int] = True self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : List[str] = True self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :str ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = 3 snake_case_ : Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ : Any = True self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Optional[int] = True self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,(padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def a__ ( self :Dict ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def a__ ( self :List[Any] ): pass def a__ ( self :str ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[int] = 0 return t def check_equivalence(_UpperCamelCase :Dict ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any={} ): with torch.no_grad(): snake_case_ : Any = model(**_UpperCamelCase ,return_dict=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Any = model(**_UpperCamelCase ,return_dict=_UpperCamelCase ,**_UpperCamelCase ).to_tuple() def recursive_check(_UpperCamelCase :List[str] ,_UpperCamelCase :Any ): if isinstance(_UpperCamelCase ,(List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase ,_UpperCamelCase ): recursive_check(_UpperCamelCase ,_UpperCamelCase ) elif isinstance(_UpperCamelCase ,_UpperCamelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() ,dict_object.values() ): recursive_check(_UpperCamelCase ,_UpperCamelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCamelCase ) ,set_nan_tensor_to_zero(_UpperCamelCase ) ,atol=1E-5 ) ,msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}. Dict has''' F''' `nan`: {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}.''' ) ,) recursive_check(_UpperCamelCase ,_UpperCamelCase ) for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : List[str] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) snake_case_ : str = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase ) snake_case_ : Optional[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase ) check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Tuple = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,{"""output_hidden_states""": True} ) snake_case_ : List[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase ) snake_case_ : Optional[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase ) check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,{"""output_hidden_states""": True} ) @require_torch class __UpperCamelCase ( unittest.TestCase , lowercase__ ): lowercase : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowercase : List[str] = MaskFormerSwinConfig def a__ ( self :Optional[Any] ): snake_case_ : Optional[Any] = MaskFormerSwinModelTester(self ) def a__ ( self :Optional[int] ): snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : List[str] = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ : Tuple = backbone_class(_UpperCamelCase ) backbone.to(_UpperCamelCase ) backbone.eval() snake_case_ : Union[str, Any] = backbone(**_UpperCamelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps ,_UpperCamelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ): self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ : List[str] = backbone(**_UpperCamelCase ,output_hidden_states=_UpperCamelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_ , snake_case_ , snake_case_ : Optional[int] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ : List[str] = backbone(**_UpperCamelCase ,output_attentions=_UpperCamelCase ) self.assertIsNotNone(outputs.attentions )
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __UpperCamelCase ( lowercase__ ): def a__ ( self :List[str] ): snake_case_ : List[Any] = tempfile.mkdtemp() snake_case_ : Dict = 5 # Realm tok snake_case_ : Optional[int] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ : str = os.path.join(self.tmpdirname ,"""realm_tokenizer""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = os.path.join(_UpperCamelCase ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname ,"""realm_block_records""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) def a__ ( self :Union[str, Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""realm_tokenizer""" ) ) def a__ ( self :int ): shutil.rmtree(self.tmpdirname ) def a__ ( self :Tuple ): snake_case_ : List[str] = RealmConfig(num_block_records=self.num_block_records ) return config def a__ ( self :Tuple ): snake_case_ : Optional[int] = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def a__ ( self :Tuple ): snake_case_ : str = np.array( [ B"""This is the first record""", B"""This is the second record""", B"""This is the third record""", B"""This is the fourth record""", B"""This is the fifth record""", B"""This is a longer longer longer record""", ] ,dtype=_UpperCamelCase ,) return block_records def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = RealmRetriever( block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,) return retriever def a__ ( self :Tuple ): snake_case_ : List[str] = self.get_config() snake_case_ : Union[str, Any] = self.get_dummy_retriever() snake_case_ : Union[str, Any] = retriever.tokenizer snake_case_ : Any = np.array([0, 3] ,dtype="""long""" ) snake_case_ : Tuple = tokenizer(["""Test question"""] ).input_ids snake_case_ : Tuple = tokenizer( ["""the fourth"""] ,add_special_tokens=_UpperCamelCase ,return_token_type_ids=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,).input_ids snake_case_ : Union[str, Any] = config.reader_seq_len snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = retriever( _UpperCamelCase ,_UpperCamelCase ,answer_ids=_UpperCamelCase ,max_length=_UpperCamelCase ,return_tensors="""np""" ) self.assertEqual(len(_UpperCamelCase ) ,2 ) self.assertEqual(len(_UpperCamelCase ) ,2 ) self.assertEqual(len(_UpperCamelCase ) ,2 ) self.assertEqual(concat_inputs.input_ids.shape ,(2, 1_0) ) self.assertEqual(concat_inputs.attention_mask.shape ,(2, 1_0) ) self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 1_0) ) self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 1_0) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] ,) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] ,) def a__ ( self :Tuple ): snake_case_ : Optional[Any] = self.get_config() snake_case_ : List[str] = self.get_dummy_retriever() snake_case_ : str = retriever.tokenizer snake_case_ : Tuple = np.array([0, 3, 5] ,dtype="""long""" ) snake_case_ : List[str] = tokenizer(["""Test question"""] ).input_ids snake_case_ : Optional[Any] = tokenizer( ["""the fourth""", """longer longer"""] ,add_special_tokens=_UpperCamelCase ,return_token_type_ids=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,).input_ids snake_case_ : List[Any] = config.reader_seq_len snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = retriever( _UpperCamelCase ,_UpperCamelCase ,answer_ids=_UpperCamelCase ,max_length=_UpperCamelCase ,return_tensors="""np""" ) self.assertEqual([False, True, True] ,_UpperCamelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,_UpperCamelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,_UpperCamelCase ) def a__ ( self :Any ): snake_case_ : Optional[Any] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) ) # Test local path snake_case_ : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] ,B"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: snake_case_ : int = os.path.join( os.path.join(self.tmpdirname ,"""realm_block_records""" ) ,_REALM_BLOCK_RECORDS_FILENAME ) snake_case_ : str = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] ,B"""This is the first record""" )
8
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
8
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
1
'''simple docstring''' import copy import random from transformers import CLIPTokenizer class __UpperCamelCase ( lowercase__ ): def __init__( self :Any ,*_UpperCamelCase :str ,**_UpperCamelCase :str ): super().__init__(*_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = {} def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ,*_UpperCamelCase :str ,**_UpperCamelCase :int ): snake_case_ : Dict = super().add_tokens(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def a__ ( self :Optional[Any] ,_UpperCamelCase :List[Any] ,*_UpperCamelCase :Any ,_UpperCamelCase :Union[str, Any]=1 ,**_UpperCamelCase :Dict ): snake_case_ : Any = [] if num_vec_per_token == 1: self.try_adding_tokens(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase ) output.append(_UpperCamelCase ) else: snake_case_ : List[Any] = [] for i in range(_UpperCamelCase ): snake_case_ : List[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase ) output.append(_UpperCamelCase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) snake_case_ : Optional[Any] = output def a__ ( self :Tuple ,_UpperCamelCase :str ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[Any]=1.0 ): if isinstance(_UpperCamelCase ,_UpperCamelCase ): snake_case_ : Optional[int] = [] for i in range(len(_UpperCamelCase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=_UpperCamelCase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: snake_case_ : Tuple = self.token_map[placeholder_token] snake_case_ : Union[str, Any] = tokens[: 1 + int(len(_UpperCamelCase ) * prop_tokens_to_load )] if vector_shuffle: snake_case_ : Optional[Any] = copy.copy(_UpperCamelCase ) random.shuffle(_UpperCamelCase ) snake_case_ : List[str] = text.replace(_UpperCamelCase ,""" """.join(_UpperCamelCase ) ) return text def __call__( self :int ,_UpperCamelCase :Tuple ,*_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int=False ,_UpperCamelCase :Dict=1.0 ,**_UpperCamelCase :Optional[int] ): return super().__call__( self.replace_placeholder_tokens_in_text( _UpperCamelCase ,vector_shuffle=_UpperCamelCase ,prop_tokens_to_load=_UpperCamelCase ) ,*_UpperCamelCase ,**_UpperCamelCase ,) def a__ ( self :str ,_UpperCamelCase :List[str] ,*_UpperCamelCase :str ,_UpperCamelCase :str=False ,_UpperCamelCase :str=1.0 ,**_UpperCamelCase :Optional[Any] ): return super().encode( self.replace_placeholder_tokens_in_text( _UpperCamelCase ,vector_shuffle=_UpperCamelCase ,prop_tokens_to_load=_UpperCamelCase ) ,*_UpperCamelCase ,**_UpperCamelCase ,)
8
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def UpperCAmelCase ( ): '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
8
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
1
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' while second != 0: snake_case_ : List[str] = first & second first ^= second snake_case_ : List[str] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __A : List[Any] = int(input('Enter the first number: ').strip()) __A : Optional[Any] = int(input('Enter the second number: ').strip()) print(F'{add(first, second) = }')
8
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
1
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins __A : Union[str, Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec'] def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any ): '''simple docstring''' # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? snake_case_ : Dict = tmp_path_factory.getbasetemp() / """cache""" snake_case_ : int = test_hf_cache_home / """datasets""" snake_case_ : Optional[int] = test_hf_cache_home / """metrics""" snake_case_ : Optional[Any] = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCamelCase_ ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCamelCase_ ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCamelCase_ ) ) snake_case_ : str = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCamelCase_ ) ) snake_case_ : Union[str, Any] = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCamelCase_ ) ) @pytest.fixture(autouse=lowerCamelCase_ , scope="""session""" ) def UpperCAmelCase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' # don't take tests into account when counting downloads monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCamelCase_ )
8
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A : Optional[int] = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ : List[Any] = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: snake_case_ : Tuple = 0 # Doctest custom flag to ignore output. __A : Optional[int] = doctest.register_optionflag('IGNORE_RESULT') __A : List[Any] = doctest.OutputChecker class __UpperCamelCase ( lowercase__ ): def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) __A : str = CustomOutputChecker __A : int = HfDoctestModule __A : Dict = HfDocTestParser
8
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int]=5 ): '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("""<mask>""" ) == 1 snake_case_ : List[str] = torch.tensor(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) ).unsqueeze(0 ) # Batch size 1 snake_case_ : Union[str, Any] = model(lowerCamelCase_ )[0] # The last hidden-state is the first element of the output tuple snake_case_ : Any = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() snake_case_ : Union[str, Any] = logits[0, masked_index, :] snake_case_ : List[Any] = logits.softmax(dim=0 ) snake_case_ , snake_case_ : List[Any] = prob.topk(k=lowerCamelCase_ , dim=0 ) snake_case_ : Dict = """ """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCamelCase_ ) )] ) snake_case_ : Union[str, Any] = tokenizer.mask_token snake_case_ : List[Any] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ): snake_case_ : Tuple = predicted_token_bpe.replace("""\u2581""" , """ """ ) if " {0}".format(lowerCamelCase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(""" {0}""".format(lowerCamelCase_ ) , lowerCamelCase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowerCamelCase_ , lowerCamelCase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __A : Tuple = CamembertTokenizer.from_pretrained('camembert-base') __A : Dict = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() __A : Optional[Any] = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
8
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
1