code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] setattr(self , _lowerCAmelCase , not kwargs.pop(_lowerCAmelCase ) ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""torchscript""" , self.torchscript ) lowercase = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowercase = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**_lowerCAmelCase ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) __A = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def _a ( self ) -> Tuple["torch.device", int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowercase = torch.device("""cpu""" ) lowercase = 0 elif is_torch_tpu_available(): lowercase = xm.xla_device() lowercase = 0 else: lowercase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase = torch.cuda.device_count() return device, n_gpu @property def _a ( self ) -> Tuple: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _a ( self ) -> "torch.device": '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def _a ( self ) -> Tuple: '''simple docstring''' return self.n_gpu > 0
653
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowercase_ : Optional[int] = logging.get_logger(__name__) lowercase_ : Dict = { '''Visual-Attention-Network/van-base''': ( '''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json''' ), } class __UpperCamelCase (_UpperCAmelCase ): __A = '''van''' def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=3 , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[64, 128, 320, 512] , _lowerCAmelCase=[3, 3, 12, 3] , _lowerCAmelCase=[8, 8, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1E-2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) lowercase = image_size lowercase = num_channels lowercase = patch_sizes lowercase = strides lowercase = hidden_sizes lowercase = depths lowercase = mlp_ratios lowercase = hidden_act lowercase = initializer_range lowercase = layer_norm_eps lowercase = layer_scale_init_value lowercase = drop_path_rate lowercase = dropout_rate
653
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ : List[str] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
653
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionInstructPixaPixPipeline __A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} __A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __A = IMAGE_TO_IMAGE_IMAGE_PARAMS __A = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Dict: '''simple docstring''' lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ) if str(_lowerCAmelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(_lowerCAmelCase ) else: lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = [inputs["""prompt"""]] * 2 lowercase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0 lowercase = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase ) lowercase = image / 2 + 0.5 lowercase = image.permute(0 , 3 , 1 , 2 ) lowercase = image.repeat(2 , 1 , 1 , 1 ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowercase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] lowercase = [round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(_lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowercase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ) -> Tuple: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.get_dummy_components() lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) lowercase = VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" ) )[0] lowercase = components["""vae"""] lowercase = self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowercase = vae.encode(inputs[image_param] ).latent_dist.mode() lowercase = pipe(**_lowerCAmelCase )[0] lowercase = np.abs(out - out_latents_inputs ).max() self.assertLess(_lowerCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Tuple: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) lowercase = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> List[str]: '''simple docstring''' lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Dict: '''simple docstring''' lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase ) lowercase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> int: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> Optional[int]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _a ( self ) -> Any: '''simple docstring''' lowercase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowercase = inputs["""image"""].resize((504, 504) ) lowercase = """timbrooks/instruct-pix2pix""" lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( _lowerCAmelCase , safety_checker=_lowerCAmelCase , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = pipe(**_lowerCAmelCase ) lowercase = output.images[0] lowercase = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowercase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
653
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
1
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple ): if height >= 1: move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) move_disk(lowercase_ , lowercase_ ) move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[int] ): print("""moving disk from""" , lowercase_ , """to""" , lowercase_ ) def SCREAMING_SNAKE_CASE ( ): lowercase = int(input("""Height of hanoi: """ ).strip() ) move_tower(lowercase_ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
653
'''simple docstring''' def SCREAMING_SNAKE_CASE ( ): lowercase = [] lowercase = 1 while len(lowercase_ ) < 1E6: constant.append(str(lowercase_ ) ) i += 1 lowercase = """""".join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Tuple = logging.get_logger(__name__) lowercase_ : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowercase_ : Optional[Any] = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowercase_ : Union[str, Any] = {'''facebook/blenderbot_small-90M''': 512} def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): lowercase = set() lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase = char lowercase = set(lowercase_ ) return pairs class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="__start__" , _lowerCAmelCase="__end__" , _lowerCAmelCase="__unk__" , _lowerCAmelCase="__null__" , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' super().__init__(unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase ) with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle: lowercase = json.load(_lowerCAmelCase ) lowercase = {v: k for k, v in self.encoder.items()} with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle: lowercase = merges_handle.read().split("""\n""" )[1:-1] lowercase = [tuple(merge.split() ) for merge in merges] lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) lowercase = {} @property def _a ( self ) -> int: '''simple docstring''' return len(self.encoder ) def _a ( self ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] lowercase = re.sub("""([.,!?()])""" , r""" \1""" , _lowerCAmelCase ) lowercase = re.sub("""(')""" , r""" \1 """ , _lowerCAmelCase ) lowercase = re.sub(r"""\s{2,}""" , """ """ , _lowerCAmelCase ) if "\n" in token: lowercase = token.replace("""\n""" , """ __newln__""" ) lowercase = token.split(""" """ ) lowercase = [] for token in tokens: if not len(_lowerCAmelCase ): continue lowercase = token.lower() lowercase = tuple(_lowerCAmelCase ) lowercase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowercase = get_pairs(_lowerCAmelCase ) if not pairs: words.append(_lowerCAmelCase ) continue while True: lowercase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowercase , lowercase = bigram lowercase = [] lowercase = 0 while i < len(_lowerCAmelCase ): try: lowercase = word.index(_lowerCAmelCase , _lowerCAmelCase ) new_word.extend(word[i:j] ) lowercase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase = tuple(_lowerCAmelCase ) lowercase = new_word if len(_lowerCAmelCase ) == 1: break else: lowercase = get_pairs(_lowerCAmelCase ) lowercase = """@@ """.join(_lowerCAmelCase ) lowercase = word[:-4] lowercase = word words.append(_lowerCAmelCase ) return " ".join(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] lowercase = re.findall(r"""\S+\n?""" , _lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) ) return split_tokens def _a ( self , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = token.lower() return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' return self.decoder.get(_lowerCAmelCase , self.unk_token ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip() return out_string def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" ) lowercase = 0 with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowercase = token_index writer.write(""" """.join(_lowerCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file
653
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" ) with open(lowercase_ ) as file_hand: return str(sum(int(lowercase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : str = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionPanoramaPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = DDIMScheduler() torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Tuple: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Any: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Dict: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _a ( self ) -> str: '''simple docstring''' lowercase = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Any: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> int: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
653
1
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): lowercase_ : List[Any] = True from torch.cuda.amp import autocast lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) __A = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) __A = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) __A = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def SCREAMING_SNAKE_CASE ( lowercase_ : ModelArguments , lowercase_ : TrainingArguments ): logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase = logging.WARNING if model_args.verbose_logging: lowercase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): lowercase = logging.INFO logger.setLevel(lowercase_ ) @dataclass class __UpperCamelCase : __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) __A = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) __A = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class __UpperCamelCase : __A = 42 __A = 42 __A = "longest" __A = None __A = None def __call__( self , _lowerCAmelCase ) -> Dict[str, torch.Tensor]: '''simple docstring''' lowercase = self.feature_extractor.pad( _lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) lowercase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) lowercase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula lowercase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) lowercase = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to lowercase = 1 lowercase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices lowercase = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_lowerCAmelCase , min_masks=2 , ) return batch class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) lowercase = 0 lowercase = max_gumbel_temp lowercase = min_gumbel_temp lowercase = gumbel_temp_decay def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> torch.Tensor: '''simple docstring''' model.train() lowercase = self._prepare_inputs(_lowerCAmelCase ) if self.use_amp: with autocast(): lowercase = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase ) else: lowercase = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": lowercase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowercase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: lowercase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_lowerCAmelCase ).backward() elif self.use_apex: with amp.scale_loss(_lowerCAmelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_lowerCAmelCase ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() configure_logger(lowercase_ , lowercase_ ) # Downloading and loading a dataset from the hub. lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported lowercase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ ) def prepare_dataset(lowercase_ : List[Any] ): # check that all files have the correct sampling rate lowercase , lowercase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays lowercase = datasets.map( lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long lowercase = vectorized_datasets.filter( lambda lowercase_ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(lowercase_ : str ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` lowercase = vectorized_datasets.map( lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 lowercase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) lowercase = WavaVecaForPreTraining(lowercase_ ) lowercase = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ ) lowercase = WavaVecaPreTrainer( model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
653
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) __A = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) __A = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase = SeqaSeqDataset # Get datasets lowercase = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowercase = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) lowercase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) lowercase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowercase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase = train_result.metrics lowercase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate(metric_key_prefix="""val""" ) lowercase = data_args.n_val lowercase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) lowercase = test_output.metrics lowercase = data_args.n_test if trainer.is_world_process_zero(): lowercase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: lowercase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) lowercase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
653
1
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ : Union[str, Any] = logging.get_logger(__name__) lowercase_ : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase_ : Dict = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowercase_ : Any = { '''allenai/led-base-16384''': 1_6384, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = LEDTokenizer __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__( _lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , ) lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space: lowercase = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) ) lowercase = add_prefix_space lowercase = pre_tok_class(**_lowerCAmelCase ) lowercase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase = """post_processor""" lowercase = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase ) if tokenizer_component_instance: lowercase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase = tuple(state["""sep"""] ) if "cls" in state: lowercase = tuple(state["""cls"""] ) lowercase = False if state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space: lowercase = add_prefix_space lowercase = True if state.get("""trim_offsets""" , _lowerCAmelCase ) != trim_offsets: lowercase = trim_offsets lowercase = True if changes_to_apply: lowercase = getattr(_lowerCAmelCase , state.pop("""type""" ) ) lowercase = component_class(**_lowerCAmelCase ) setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _a ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value lowercase = value def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase = kwargs.get("""is_split_into_words""" , _lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase = kwargs.get("""is_split_into_words""" , _lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> dict: '''simple docstring''' lowercase = super()._pad( encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) # Load from model defaults if return_attention_mask is None: lowercase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase = len(encoded_inputs["""global_attention_mask"""] ) != len(_lowerCAmelCase ) if needs_to_be_padded: lowercase = len(_lowerCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
653
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = {} lowercase = {} if prompt is not None: lowercase = prompt if generate_kwargs is not None: lowercase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: '''simple docstring''' lowercase = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase = self.model.config.model_type if model_type == "git": lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids lowercase = [self.tokenizer.cls_token_id] + input_ids lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase = None return model_inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase = None if generate_kwargs is None: lowercase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase = model_inputs.pop(self.model.main_input_name ) lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] for output_ids in model_outputs: lowercase = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
653
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> bool: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Optional[int] = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
1
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : list , lowercase_ : int ): lowercase = len(lowercase_ ) lowercase = [[0] * n for i in range(lowercase_ )] for i in range(lowercase_ ): lowercase = y_points[i] for i in range(2 , lowercase_ ): for j in range(lowercase_ , lowercase_ ): lowercase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
653
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests lowercase_ : List[str] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowercase_ : Any = BASE_URL + '''/user''' # https://github.com/settings/tokens lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''') def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowercase_ , headers=lowercase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ : str = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : List[Any] = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : List[Any] = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : List[Any] = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ): lowercase = int(round(sample_rate * max_length ) ) if len(lowercase_ ) <= sample_length: return wav lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __UpperCamelCase : __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) __A = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __A = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __UpperCamelCase : __A = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) __A = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( self ) -> List[Any]: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowercase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowercase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowercase = feature_extractor.model_input_names[0] def train_transforms(lowercase_ : int ): lowercase = [] for audio in batch[data_args.audio_column_name]: lowercase = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowercase_ ) lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase_ : Dict ): lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names lowercase , lowercase = {}, {} for i, label in enumerate(lowercase_ ): lowercase = str(lowercase_ ) lowercase = label # Load the accuracy metric from the datasets package lowercase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase_ : Tuple ): lowercase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids ) lowercase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowercase = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ ) # Initialize our trainer lowercase = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Write model card and (optionally) push to hub lowercase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
653
1
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_multiple_size lowercase = hidden_act lowercase = hidden_dropout lowercase = attention_dropout lowercase = weight_tying lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = self.get_config() return config, input_ids, input_mask, token_labels def _a ( self ) -> List[Any]: '''simple docstring''' return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase , lowercase , lowercase , lowercase = self.prepare_config_and_inputs() lowercase = True return config, input_ids, input_mask, token_labels def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowercase = GPTNeoXJapaneseModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = True lowercase = GPTNeoXJapaneseModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = True lowercase = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # first forward pass lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) lowercase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) lowercase = output_from_no_past["""hidden_states"""][0] lowercase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0] # select random slice lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () __A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () __A = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = GPTNeoXJapaneseModelTester(self ) lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def _a ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def _a ( self ) -> int: '''simple docstring''' lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase = None self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase ) @slow def _a ( self ) -> Dict: '''simple docstring''' lowercase = """abeja/gpt-neox-japanese-2.7b""" lowercase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] lowercase = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] lowercase = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase ) lowercase = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase ) lowercase = [] for prompt in prompts: lowercase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" ).input_ids lowercase = model.generate(_lowerCAmelCase , max_length=50 ) lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) predicted_outputs += generated_string self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
653
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> bool: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : str = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __UpperCamelCase (_UpperCAmelCase ): __A = '''vit_msn''' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = qkv_bias
653
1
'''simple docstring''' lowercase_ : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def SCREAMING_SNAKE_CASE ( lowercase_ : bytes ): # Make sure the supplied data is a bytes-like object if not isinstance(lowercase_ , lowercase_ ): lowercase = F"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(lowercase_ ) lowercase = """""".join(bin(lowercase_ )[2:].zfill(8 ) for byte in data ) lowercase = len(lowercase_ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase = B"""=""" * ((6 - len(lowercase_ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(lowercase_ ) % 6) else: lowercase = B"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(lowercase_ ) , 6 ) ).encode() + padding ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ): lowercase = ( """argument should be a bytes-like object or ASCII string, """ F"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(lowercase_ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(lowercase_ , lowercase_ ): try: lowercase = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase = encoded_data[:-padding] lowercase = """""".join( bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase = """""".join( bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(lowercase_ ) , 8 ) ] return bytes(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
653
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase = """""" for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): return data[1:] + data[0] def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ): lowercase = """""" for i in range(len(lowercase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = int("""0b""" + data[0] + data[-1] , 2 ) lowercase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ): lowercase = message[:4] lowercase = message[4:] lowercase = apply_table(lowercase_ , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741 lowercase = apply_sbox(lowercase_ , temp[4:] ) lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741 lowercase = """0""" * (2 - len(lowercase_ )) + r lowercase = apply_table(l + r , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) return temp + right if __name__ == "__main__": lowercase_ : Tuple = input('''Enter 10 bit key: ''') lowercase_ : Any = input('''Enter 8 bit message: ''') lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] lowercase_ : List[Any] = [2, 4, 3, 1] lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7] lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6] lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1] lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowercase_ : Union[str, Any] = apply_table(key, paa_table) lowercase_ : Optional[Any] = temp[:5] lowercase_ : int = temp[5:] lowercase_ : List[str] = left_shift(left) lowercase_ : int = left_shift(right) lowercase_ : Tuple = apply_table(left + right, pa_table) lowercase_ : List[str] = left_shift(left) lowercase_ : Optional[Any] = left_shift(right) lowercase_ : Union[str, Any] = left_shift(left) lowercase_ : Union[str, Any] = left_shift(right) lowercase_ : Optional[int] = apply_table(left + right, pa_table) # encryption lowercase_ : int = apply_table(message, IP) lowercase_ : Dict = function(expansion, sa, sa, keya, temp) lowercase_ : Any = temp[4:] + temp[:4] lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp) lowercase_ : Tuple = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption lowercase_ : List[str] = apply_table(CT, IP) lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = temp[4:] + temp[:4] lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
653
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowercase_ : Any = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowercase_ : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if "://" in dataset_path: lowercase = dataset_path.split("""://""" )[1] return dataset_path def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem ): if fs is not None and fs.protocol != "file": return True else: return False def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem , lowercase_ : str , lowercase_ : str ): lowercase = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ ) def SCREAMING_SNAKE_CASE ( ): if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowercase = None lowercase = None lowercase = threading.Lock()
653
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase_ : int = 50_0000 lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__) lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ): lowercase = dataset.map(**lowercase_ ) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ): lowercase = dataset.filter(**lowercase_ ) def SCREAMING_SNAKE_CASE ( ): lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowercase = generate_example_dataset( os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ ) lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) def tokenize(lowercase_ : Dict ): return tokenizer(examples["""text"""] ) lowercase = map(lowercase_ ) lowercase = map(lowercase_ , batched=lowercase_ ) lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""numpy""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""pandas""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) lowercase = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , """wb""" ) as f: f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
653
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowercase_ : int = None lowercase_ : List[Any] = logging.get_logger(__name__) lowercase_ : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase_ : Dict = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowercase_ : Union[str, Any] = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowercase_ : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = PRETRAINED_VOCAB_FILES_MAP __A = ['''input_ids''', '''attention_mask'''] __A = MBartTokenizer __A = [] __A = [] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token super().__init__( vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = False if not self.vocab_file else True lowercase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) lowercase = { lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase = src_lang if src_lang is not None else """en_XX""" lowercase = self.convert_tokens_to_ids(self._src_lang ) lowercase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _a ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def _a ( self , _lowerCAmelCase ) -> None: '''simple docstring''' lowercase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowercase = src_lang lowercase = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = self.convert_tokens_to_ids(_lowerCAmelCase ) lowercase = tgt_lang_id return inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase = "en_XX" , _lowerCAmelCase = None , _lowerCAmelCase = "ro_RO" , **_lowerCAmelCase , ) -> BatchEncoding: '''simple docstring''' lowercase = src_lang lowercase = tgt_lang return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) def _a ( self ) -> List[Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _a ( self ) -> Any: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a ( self , _lowerCAmelCase ) -> None: '''simple docstring''' lowercase = self.convert_tokens_to_ids(_lowerCAmelCase ) lowercase = [] lowercase = [self.eos_token_id, self.cur_lang_code] lowercase = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self , _lowerCAmelCase ) -> None: '''simple docstring''' lowercase = self.convert_tokens_to_ids(_lowerCAmelCase ) lowercase = [] lowercase = [self.eos_token_id, self.cur_lang_code] lowercase = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ): copyfile(self.vocab_file , _lowerCAmelCase ) return (out_vocab_file,)
653
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ): lowercase = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality lowercase = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase = tf.placeholder("""float64""" , [dim] ) lowercase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase = tf.placeholder("""int32""" ) lowercase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase = tf.placeholder("""float""" , [noofclusters] ) lowercase = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase = 100 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): lowercase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster lowercase = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase = sess.run(lowercase_ ) lowercase = sess.run(lowercase_ ) return centroids, assignments
653
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 lowercase = update_area_of_max_square(lowercase_ , col + 1 ) lowercase = update_area_of_max_square(row + 1 , col + 1 ) lowercase = update_area_of_max_square(row + 1 , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) return sub_problem_sol else: return 0 lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square_using_dp_array( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) lowercase = sub_problem_sol return sub_problem_sol else: return 0 lowercase = [0] lowercase = [[-1] * cols for _ in range(lowercase_ )] update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = dp_array[row][col + 1] lowercase = dp_array[row + 1][col + 1] lowercase = dp_array[row + 1][col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(dp_array[row][col] , lowercase_ ) else: lowercase = 0 return largest_square_area def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [0] * (cols + 1) lowercase = [0] * (cols + 1) lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = current_row[col + 1] lowercase = next_row[col + 1] lowercase = next_row[col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(current_row[col] , lowercase_ ) else: lowercase = 0 lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def SCREAMING_SNAKE_CASE ( lowercase_ : Any ): lowercase = {} lowercase = job["""started_at"""] lowercase = job["""completed_at"""] lowercase = date_parser.parse(lowercase_ ) lowercase = date_parser.parse(lowercase_ ) lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowercase = start lowercase = end lowercase = duration_in_min return job_info def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : List[Any]=None ): lowercase = None if token is not None: lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""} lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowercase = requests.get(lowercase_ , headers=lowercase_ ).json() lowercase = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} ) lowercase = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(lowercase_ ): lowercase = requests.get(url + F"""&page={i + 2}""" , headers=lowercase_ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": lowercase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') lowercase_ : Union[str, Any] = parser.parse_args() lowercase_ : str = get_job_time(args.workflow_run_id) lowercase_ : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = '''gpt_bigcode''' __A = ['''past_key_values'''] __A = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase = vocab_size lowercase = n_positions lowercase = n_embd lowercase = n_layer lowercase = n_head lowercase = n_inner lowercase = activation_function lowercase = resid_pdrop lowercase = embd_pdrop lowercase = attn_pdrop lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = scale_attn_weights lowercase = use_cache lowercase = attention_softmax_in_fpaa lowercase = scale_attention_softmax_in_fpaa lowercase = multi_query lowercase = bos_token_id lowercase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
653
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ): __A = KandinskyInpaintPipeline __A = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] __A = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] __A = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __A = False @property def _a ( self ) -> Tuple: '''simple docstring''' return 32 @property def _a ( self ) -> Union[str, Any]: '''simple docstring''' return 32 @property def _a ( self ) -> Union[str, Any]: '''simple docstring''' return self.time_input_dim @property def _a ( self ) -> Any: '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self ) -> Dict: '''simple docstring''' return 100 @property def _a ( self ) -> str: '''simple docstring''' lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def _a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowercase = MultilingualCLIP(_lowerCAmelCase ) lowercase = text_encoder.eval() return text_encoder @property def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowercase = UNetaDConditionModel(**_lowerCAmelCase ) return model @property def _a ( self ) -> Tuple: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self ) -> Any: '''simple docstring''' lowercase = self.dummy_text_encoder lowercase = self.dummy_tokenizer lowercase = self.dummy_unet lowercase = self.dummy_movq lowercase = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowerCAmelCase , ) lowercase = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Dict: '''simple docstring''' lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase ) # create init_image lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) ) # create mask lowercase = np.ones((64, 64) , dtype=np.floataa ) lowercase = 0 if str(_lowerCAmelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(_lowerCAmelCase ) else: lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """horse""", """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = """cpu""" lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) ) lowercase = output.images lowercase = pipe( **self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0] lowercase = image[0, -3:, -3:, -1] lowercase = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) lowercase = np.array( [0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self ) -> List[str]: '''simple docstring''' lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" ) lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowercase = np.ones((768, 768) , dtype=np.floataa ) lowercase = 0 lowercase = """a hat""" lowercase = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCAmelCase ) lowercase = KandinskyInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa ) lowercase = pipeline.to(_lowerCAmelCase ) pipeline.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase , lowercase = pipe_prior( _lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() lowercase = pipeline( _lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
653
'''simple docstring''' import requests def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = {"""Content-Type""": """application/json"""} lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ ) if response.status_code != 200: lowercase = ( """Request to slack returned an error """ F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(lowercase_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
653
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> int: '''simple docstring''' lowercase = tempfile.mkdtemp() # fmt: off lowercase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) lowercase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowercase = {"""unk_token""": """<unk>"""} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) lowercase = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self , **_lowerCAmelCase ) -> Tuple: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def _a ( self , **_lowerCAmelCase ) -> Tuple: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def _a ( self , **_lowerCAmelCase ) -> Dict: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def _a ( self ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.get_tokenizer() lowercase = self.get_rust_tokenizer() lowercase = self.get_image_processor() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase ) lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase ) def _a ( self ) -> int: '''simple docstring''' lowercase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 ) lowercase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCAmelCase ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase = self.prepare_image_inputs() lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" ) lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase = """lower newer""" lowercase = processor(text=_lowerCAmelCase ) lowercase = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase = processor.batch_decode(_lowerCAmelCase ) lowercase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
653
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowercase = [144, 192, 240] lowercase = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowercase = [96, 120, 144] lowercase = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowercase = [64, 80, 96] lowercase = [16, 16, 24, 48, 64, 80, 320] lowercase = 0.05 lowercase = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = 512 lowercase = 16 lowercase = 21 lowercase = """pascal-voc-id2label.json""" else: lowercase = 1000 lowercase = """imagenet-1k-id2label.json""" lowercase = """huggingface/label-files""" lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) lowercase = {int(lowercase_ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ): for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowercase = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: lowercase = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: lowercase = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: lowercase = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: lowercase = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: lowercase = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: lowercase = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: lowercase = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: lowercase = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: lowercase = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: lowercase = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): lowercase = """mobilevit.""" + name return name def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ): if base_model: lowercase = """""" else: lowercase = """mobilevit.""" for key in orig_state_dict.copy().keys(): lowercase = orig_state_dict.pop(lowercase_ ) if key[:8] == "encoder.": lowercase = key[8:] if "qkv" in key: lowercase = key.split(""".""" ) lowercase = int(key_split[0][6:] ) - 1 lowercase = int(key_split[3] ) lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowercase = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowercase = val[:dim, :] lowercase = val[dim : dim * 2, :] lowercase = val[-dim:, :] else: lowercase = val[:dim] lowercase = val[dim : dim * 2] lowercase = val[-dim:] else: lowercase = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ): lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ): lowercase = get_mobilevit_config(lowercase_ ) # load original state_dict lowercase = torch.load(lowercase_ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval() else: lowercase = MobileViTForImageClassification(lowercase_ ).eval() lowercase = convert_state_dict(lowercase_ , lowercase_ ) model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase = model(**lowercase_ ) lowercase = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowercase = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowercase = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowercase = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if push_to_hub: lowercase = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) lowercase = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase_ , organization="""apple""" ) model.push_to_hub(lowercase_ , organization="""apple""" ) if __name__ == "__main__": lowercase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowercase_ : List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule lowercase_ : Optional[Any] = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = num_labels lowercase = image_size lowercase = layer_depths lowercase = embed_dims def _a ( self ) -> Tuple: '''simple docstring''' lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels def _a ( self ) -> int: '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = SwiftFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = self.num_labels lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' ((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs() lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __A = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> Dict: '''simple docstring''' lowercase = SwiftFormerModelTester(self ) lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _a ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _a ( self ) -> List[str]: '''simple docstring''' pass def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _a ( self ) -> int: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self ) -> Any: '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _a ( self ) -> Optional[Any]: '''simple docstring''' pass def _a ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.hidden_states lowercase = 8 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_lowerCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' def _config_zero_init(_lowerCAmelCase ): lowercase = copy.deepcopy(_lowerCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 ) if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ): lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return configs_no_init lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowercase = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCamelCase (unittest.TestCase ): @cached_property def _a ( self ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase = model(**_lowerCAmelCase ) # verify the logits lowercase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
653
1
'''simple docstring''' import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ : str = "https://www.worldometers.info/coronavirus" ): lowercase = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" ) lowercase = soup.findAll("""h1""" ) lowercase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" , {"""class""": """panel-title"""} ) values += soup.findAll("""div""" , {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowercase_ , lowercase_ )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
653
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
1
'''simple docstring''' import numpy as np lowercase_ : str = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class __UpperCamelCase : def __init__( self ) -> None: '''simple docstring''' lowercase = np.array(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> np.ndarray: '''simple docstring''' lowercase , lowercase = np.where(letter == self.SQUARE ) lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = message.lower() lowercase = message.replace(""" """ , """""" ) lowercase = message.replace("""j""" , """i""" ) lowercase = np.empty((2, len(_lowerCAmelCase )) ) for letter_index in range(len(_lowerCAmelCase ) ): lowercase = self.letter_to_numbers(message[letter_index] ) lowercase = numbers[0] lowercase = numbers[1] lowercase = first_step.reshape(2 * len(_lowerCAmelCase ) ) lowercase = """""" for numbers_index in range(len(_lowerCAmelCase ) ): lowercase = int(second_step[numbers_index * 2] ) lowercase = int(second_step[(numbers_index * 2) + 1] ) lowercase = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase ) lowercase = encoded_message + letter return encoded_message def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = message.lower() message.replace(""" """ , """""" ) lowercase = np.empty(2 * len(_lowerCAmelCase ) ) for letter_index in range(len(_lowerCAmelCase ) ): lowercase = self.letter_to_numbers(message[letter_index] ) lowercase = numbers[0] lowercase = numbers[1] lowercase = first_step.reshape((2, len(_lowerCAmelCase )) ) lowercase = """""" for numbers_index in range(len(_lowerCAmelCase ) ): lowercase = int(second_step[0, numbers_index] ) lowercase = int(second_step[1, numbers_index] ) lowercase = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase ) lowercase = decoded_message + letter return decoded_message
653
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ : List[str] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ : List[Any] = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['''CLIPFeatureExtractor'''] lowercase_ : str = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Union[str, Any] = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
1
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
'''simple docstring''' def SCREAMING_SNAKE_CASE ( ): lowercase = [] lowercase = 1 while len(lowercase_ ) < 1E6: constant.append(str(lowercase_ ) ) i += 1 lowercase = """""".join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
1
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin lowercase_ : Tuple = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=16 , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=14 , _lowerCAmelCase=10 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=True , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=[1, 2, 3, 4, 5] , _lowerCAmelCase=25 , _lowerCAmelCase=5 , ) -> Tuple: '''simple docstring''' lowercase = d_model lowercase = parent lowercase = batch_size lowercase = prediction_length lowercase = context_length lowercase = cardinality lowercase = num_time_features lowercase = lags_sequence lowercase = embedding_dimension lowercase = is_training lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = context_length lowercase = prediction_length + label_length lowercase = label_length lowercase = moving_average lowercase = autocorrelation_factor def _a ( self ) -> Tuple: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = config.context_length + max(config.lags_sequence ) lowercase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) lowercase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) lowercase = floats_tensor([self.batch_size, _past_length] ) lowercase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs lowercase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) lowercase = floats_tensor([self.batch_size, config.prediction_length] ) lowercase = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def _a ( self ) -> Any: '''simple docstring''' lowercase = self.get_config() lowercase = self.prepare_autoformer_inputs_dict(_lowerCAmelCase ) return config, inputs_dict def _a ( self ) -> Tuple: '''simple docstring''' lowercase , lowercase = self.prepare_config_and_inputs() return config, inputs_dict def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: '''simple docstring''' lowercase = AutoformerModel(config=_lowerCAmelCase ).to(_lowerCAmelCase ).eval() lowercase = model(**_lowerCAmelCase ) lowercase = outputs.encoder_last_hidden_state lowercase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowercase = model.get_encoder() encoder.save_pretrained(_lowerCAmelCase ) lowercase = AutoformerEncoder.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase ) lowercase , lowercase , lowercase , lowercase , lowercase = model.create_network_inputs(**_lowerCAmelCase ) lowercase , lowercase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) lowercase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) lowercase = encoder(inputs_embeds=_lowerCAmelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) lowercase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) lowercase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) lowercase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) lowercase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase = model.get_decoder() decoder.save_pretrained(_lowerCAmelCase ) lowercase = AutoformerDecoder.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase ) lowercase = decoder( trend=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __A = (AutoformerForPrediction,) if is_torch_available() else () __A = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} __A = False __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = AutoformerModelTester(self ) lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def _a ( self ) -> str: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) lowercase , lowercase = model_class.from_pretrained(_lowerCAmelCase , output_loading_info=_lowerCAmelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_lowerCAmelCase ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' pass def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = inspect.signature(getattr(_lowerCAmelCase , """forward""" ) ) # The main input is the name of the argument after `self` lowercase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(_lowerCAmelCase )] , _lowerCAmelCase ) def _a ( self ) -> Any: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = True lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) lowercase = getattr(self.model_tester , """d_model""" , _lowerCAmelCase ) lowercase = getattr(self.model_tester , """num_attention_heads""" , _lowerCAmelCase ) lowercase = d_model // num_attention_heads for model_class in self.all_model_classes: lowercase = True lowercase = False lowercase = True lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase = True lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.encoder_attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) lowercase = len(_lowerCAmelCase ) lowercase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # decoder attentions lowercase = outputs.decoder_attentions self.assertIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions lowercase = outputs.cross_attentions self.assertIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine lowercase = True lowercase = True lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertEqual(out_len + 2 , len(_lowerCAmelCase ) ) lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _a ( self ) -> Any: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def SCREAMING_SNAKE_CASE ( lowercase_ : str="train-batch.pt" ): lowercase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowercase_ , repo_type="""dataset""" ) lowercase = torch.load(lowercase_ , map_location=lowercase_ ) return batch @require_torch @slow class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> int: '''simple docstring''' lowercase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowerCAmelCase ) lowercase = prepare_batch() with torch.no_grad(): lowercase = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] lowercase = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) lowercase = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowerCAmelCase ) lowercase = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): lowercase = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state lowercase = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _lowerCAmelCase ) lowercase = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowerCAmelCase ) lowercase = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): lowercase = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) lowercase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _lowerCAmelCase ) lowercase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_lowerCAmelCase ) lowercase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowerCAmelCase , rtol=1E-1 ) )
653
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" ) with open(lowercase_ ) as file_hand: return str(sum(int(lowercase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
653
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def SCREAMING_SNAKE_CASE ( lowercase_ : BertModel , lowercase_ : str , lowercase_ : str ): lowercase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""") lowercase = ( ("""layer.""", """layer_"""), ("""word_embeddings.weight""", """word_embeddings"""), ("""position_embeddings.weight""", """position_embeddings"""), ("""token_type_embeddings.weight""", """token_type_embeddings"""), (""".""", """/"""), ("""LayerNorm/weight""", """LayerNorm/gamma"""), ("""LayerNorm/bias""", """LayerNorm/beta"""), ("""weight""", """kernel"""), ) if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) lowercase = model.state_dict() def to_tf_var_name(lowercase_ : str ): for patt, repl in iter(lowercase_ ): lowercase = name.replace(lowercase_ , lowercase_ ) return F"""bert/{name}""" def create_tf_var(lowercase_ : np.ndarray , lowercase_ : str , lowercase_ : tf.Session ): lowercase = tf.dtypes.as_dtype(tensor.dtype ) lowercase = tf.get_variable(dtype=lowercase_ , shape=tensor.shape , name=lowercase_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowercase_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase = to_tf_var_name(lowercase_ ) lowercase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase = torch_tensor.T lowercase = create_tf_var(tensor=lowercase_ , name=lowercase_ , session=lowercase_ ) tf.keras.backend.set_value(lowercase_ , lowercase_ ) lowercase = session.run(lowercase_ ) print(F"""Successfully created {tf_name}: {np.allclose(lowercase_ , lowercase_ )}""" ) lowercase = tf.train.Saver(tf.trainable_variables() ) saver.save(lowercase_ , os.path.join(lowercase_ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) ) def SCREAMING_SNAKE_CASE ( lowercase_ : int=None ): lowercase = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=lowercase_ , required=lowercase_ , help="""model name e.g. bert-base-uncased""" ) parser.add_argument( """--cache_dir""" , type=lowercase_ , default=lowercase_ , required=lowercase_ , help="""Directory containing pytorch model""" ) parser.add_argument("""--pytorch_model_path""" , type=lowercase_ , required=lowercase_ , help="""/path/to/<pytorch-model-name>.bin""" ) parser.add_argument("""--tf_cache_dir""" , type=lowercase_ , required=lowercase_ , help="""Directory in which to save tensorflow model""" ) lowercase = parser.parse_args(lowercase_ ) lowercase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowercase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
653
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionPanoramaPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = DDIMScheduler() torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Tuple: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Any: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Dict: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _a ( self ) -> str: '''simple docstring''' lowercase = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Any: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> int: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
653
1
'''simple docstring''' lowercase_ : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } lowercase_ : str = {value: key for key, value in encode_dict.items()} def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ): lowercase = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) lowercase = """""" for word in coded.split(): while len(SCREAMING_SNAKE_CASE_ ) != 0: decoded += decode_dict[word[:5]] lowercase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
700
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) __A = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) __A = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase = SeqaSeqDataset # Get datasets lowercase = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowercase = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) lowercase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) lowercase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowercase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase = train_result.metrics lowercase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate(metric_key_prefix="""val""" ) lowercase = data_args.n_val lowercase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) lowercase = test_output.metrics lowercase = data_args.n_test if trainer.is_world_process_zero(): lowercase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: lowercase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) lowercase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ): def count_of_possible_combinations(lowercase_ : List[str] ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__A ) def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ): def count_of_possible_combinations_with_dp_array( lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase = sum( count_of_possible_combinations_with_dp_array(target - item , __A ) for item in array ) lowercase = answer return answer lowercase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__A , __A ) def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ): lowercase = [0] * (target + 1) lowercase = 1 for i in range(1 , target + 1 ): for j in range(__A ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ : Any = 3 lowercase_ : Dict = 5 lowercase_ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
701
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = {} lowercase = {} if prompt is not None: lowercase = prompt if generate_kwargs is not None: lowercase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: '''simple docstring''' lowercase = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase = self.model.config.model_type if model_type == "git": lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids lowercase = [self.tokenizer.cls_token_id] + input_ids lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase = None return model_inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase = None if generate_kwargs is None: lowercase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase = model_inputs.pop(self.model.main_input_name ) lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] for output_ids in model_outputs: lowercase = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
653
0
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __UpperCamelCase (snake_case__ ): __A = 42 __A = None def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[Any]=0.999 , lowercase_ : List[Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase_ : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowercase = [] for i in range(_SCREAMING_SNAKE_CASE ): lowercase = i / num_diffusion_timesteps lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class __UpperCamelCase (snake_case__ , snake_case__ ): @register_to_config def __init__( self , _lowerCAmelCase = 1000 , _lowerCAmelCase = "fixed_small_log" , _lowerCAmelCase = True , _lowerCAmelCase = 1.0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]: '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" ) lowercase = betas_for_alpha_bar(UpperCAmelCase_ ) lowercase = 1.0 - self.betas lowercase = torch.cumprod(self.alphas , dim=0 ) lowercase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowercase = 1.0 # setable values lowercase = None lowercase = torch.from_numpy(np.arange(0 , UpperCAmelCase_ )[::-1].copy() ) lowercase = variance_type def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple: '''simple docstring''' lowercase = num_inference_steps lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowercase = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowercase = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' if prev_timestep is None: lowercase = t - 1 lowercase = self.alphas_cumprod[t] lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowercase = 1 - alpha_prod_t lowercase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowercase = self.betas[t] else: lowercase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowercase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowercase = torch.log(torch.clamp(UpperCAmelCase_ , min=1E-20 ) ) lowercase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowercase = variance.log() lowercase = beta.log() lowercase = (predicted_variance + 1) / 2 lowercase = frac * max_log + (1 - frac) * min_log return variance def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: '''simple docstring''' lowercase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowercase , lowercase = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 ) else: lowercase = None # 1. compute alphas, betas if prev_timestep is None: lowercase = t - 1 lowercase = self.alphas_cumprod[t] lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowercase = 1 - alpha_prod_t lowercase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowercase = self.betas[t] lowercase = self.alphas[t] else: lowercase = 1 - alpha_prod_t / alpha_prod_t_prev lowercase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase = torch.clamp( UpperCAmelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowercase = 0 if t > 0: lowercase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ , device=model_output.device ) lowercase = self._get_variance( UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , ) if self.variance_type == "fixed_small_log": lowercase = variance elif self.variance_type == "learned_range": lowercase = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" """ for the UnCLIPScheduler.""" ) lowercase = variance * variance_noise lowercase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> torch.FloatTensor: '''simple docstring''' lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowercase = timesteps.to(original_samples.device ) lowercase = alphas_cumprod[timesteps] ** 0.5 lowercase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowercase = sqrt_alpha_prod.unsqueeze(-1 ) lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5 lowercase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
702
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase_ , lowercase_ ) ) ) def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Optional[Any] ): if dataset.ndim != value_array.ndim: lowercase = ( """Wrong input data\'s dimensions... """ F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(lowercase_ ) try: if dataset.shape[1] != value_array.shape[1]: lowercase = ( """Wrong input data\'s shape... """ F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(lowercase_ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: lowercase = ( """Input data have different datatype... """ F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(lowercase_ ) lowercase = [] for value in value_array: lowercase = euclidean(lowercase_ , dataset[0] ) lowercase = dataset[0].tolist() for dataset_value in dataset[1:]: lowercase = euclidean(lowercase_ , lowercase_ ) if dist > temp_dist: lowercase = temp_dist lowercase = dataset_value.tolist() answer.append([vector, dist] ) return answer def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict ): return np.dot(lowercase_ , lowercase_ ) / (norm(lowercase_ ) * norm(lowercase_ )) if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ : List[str] = logging.get_logger(__name__) class __UpperCamelCase (_UpperCAmelCase ): __A = ['''pixel_values'''] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase_ ) lowercase = size if size is not None else {'shortest_edge': 384} lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowercase = do_resize lowercase = size # Default value set here for backwards compatibility where the value in config is None lowercase = crop_pct if crop_pct is not None else 224 / 256 lowercase = resample lowercase = do_rescale lowercase = rescale_factor lowercase = do_normalize lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: '''simple docstring''' lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" ) lowercase = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowercase = int(shortest_edge / crop_pct ) lowercase = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowercase = resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCamelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict: '''simple docstring''' return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' lowercase = do_resize if do_resize is not None else self.do_resize lowercase = crop_pct if crop_pct is not None else self.crop_pct lowercase = resample if resample is not None else self.resample lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = do_normalize if do_normalize is not None else self.do_normalize lowercase = image_mean if image_mean is not None else self.image_mean lowercase = image_std if image_std is not None else self.image_std lowercase = size if size is not None else self.size lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowercase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: lowercase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowercase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowercase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowercase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowercase = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
704
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests lowercase_ : List[str] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowercase_ : Any = BASE_URL + '''/user''' # https://github.com/settings/tokens lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''') def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowercase_ , headers=lowercase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
653
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase (lowercase__ , unittest.TestCase ): __A = DiTPipeline __A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __A = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } __A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __A = False def _a ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) lowercase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__lowerCamelCase , ) lowercase = AutoencoderKL() lowercase = DDIMScheduler() lowercase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> str: '''simple docstring''' if str(__lowerCamelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(__lowerCamelCase ) else: lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowercase = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = "cpu" lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowercase = self.get_dummy_inputs(__lowerCamelCase ) lowercase = pipe(**__lowerCamelCase ).images lowercase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) lowercase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCamelCase , 1E-3 ) def _a ( self ) -> Optional[Any]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self ) -> Dict: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self ) -> List[str]: '''simple docstring''' lowercase = torch.manual_seed(0 ) lowercase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) lowercase = ["vase", "umbrella", "white shark", "white wolf"] lowercase = pipe.get_label_ids(__lowerCamelCase ) lowercase = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(__lowerCamelCase , __lowerCamelCase ): lowercase = load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def _a ( self ) -> int: '''simple docstring''' lowercase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) lowercase = ["vase", "umbrella"] lowercase = pipe.get_label_ids(__lowerCamelCase ) lowercase = torch.manual_seed(0 ) lowercase = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(__lowerCamelCase , __lowerCamelCase ): lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
705
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ): lowercase = int(round(sample_rate * max_length ) ) if len(lowercase_ ) <= sample_length: return wav lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __UpperCamelCase : __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) __A = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __A = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __UpperCamelCase : __A = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) __A = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( self ) -> List[Any]: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowercase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowercase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowercase = feature_extractor.model_input_names[0] def train_transforms(lowercase_ : int ): lowercase = [] for audio in batch[data_args.audio_column_name]: lowercase = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowercase_ ) lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase_ : Dict ): lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names lowercase , lowercase = {}, {} for i, label in enumerate(lowercase_ ): lowercase = str(lowercase_ ) lowercase = label # Load the accuracy metric from the datasets package lowercase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase_ : Tuple ): lowercase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids ) lowercase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowercase = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ ) # Initialize our trainer lowercase = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Write model card and (optionally) push to hub lowercase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
653
0
'''simple docstring''' from pathlib import Path import fire def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str ): lowercase = Path(lowerCamelCase_ ) lowercase = Path(lowerCamelCase_ ) dest_dir.mkdir(exist_ok=lowerCamelCase_ ) for path in src_dir.iterdir(): lowercase = [x.rstrip() for x in list(path.open().readlines() )][:n] lowercase = dest_dir.joinpath(path.name ) print(lowerCamelCase_ ) dest_path.open("""w""" ).write("""\n""".join(lowerCamelCase_ ) ) if __name__ == "__main__": fire.Fire(minify)
706
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> bool: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
0
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (UpperCAmelCase_ , unittest.TestCase ): __A = TextToVideoSDPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __A = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _a ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) lowercase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) lowercase = CLIPTextModel(_lowercase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> int: '''simple docstring''' if str(_lowercase ).startswith("""mps""" ): lowercase = torch.manual_seed(_lowercase ) else: lowercase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowercase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = TextToVideoSDPipeline(**_lowercase ) lowercase = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) lowercase = self.get_dummy_inputs(_lowercase ) lowercase = 'np' lowercase = sd_pipe(**_lowercase ).frames lowercase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) lowercase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Optional[int]: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1E-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def _a ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def _a ( self ) -> Optional[int]: '''simple docstring''' pass def _a ( self ) -> Union[str, Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> int: '''simple docstring''' lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase = pipe.to("""cuda""" ) lowercase = 'Spiderman is surfing' lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="""pt""" ).frames lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def _a ( self ) -> int: '''simple docstring''' lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) lowercase = pipe.to("""cuda""" ) lowercase = 'Spiderman is surfing' lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""pt""" ).frames lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : str = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __UpperCamelCase (_UpperCAmelCase ): __A = '''vit_msn''' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = qkv_bias
653
0
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowercase_ : int = logging.get_logger(__name__) lowercase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase_ : List[Any] = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } lowercase_ : Tuple = {'''allegro/herbert-base-cased''': 514} lowercase_ : int = {} class __UpperCamelCase (__A ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_INIT_CONFIGURATION __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = HerbertTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase="</s>" , **_lowerCAmelCase , ) -> Dict: '''simple docstring''' super().__init__( _lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , **_lowerCAmelCase , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: '''simple docstring''' lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
708
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase = """""" for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): return data[1:] + data[0] def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ): lowercase = """""" for i in range(len(lowercase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = int("""0b""" + data[0] + data[-1] , 2 ) lowercase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ): lowercase = message[:4] lowercase = message[4:] lowercase = apply_table(lowercase_ , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741 lowercase = apply_sbox(lowercase_ , temp[4:] ) lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741 lowercase = """0""" * (2 - len(lowercase_ )) + r lowercase = apply_table(l + r , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) return temp + right if __name__ == "__main__": lowercase_ : Tuple = input('''Enter 10 bit key: ''') lowercase_ : Any = input('''Enter 8 bit message: ''') lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] lowercase_ : List[Any] = [2, 4, 3, 1] lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7] lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6] lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1] lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowercase_ : Union[str, Any] = apply_table(key, paa_table) lowercase_ : Optional[Any] = temp[:5] lowercase_ : int = temp[5:] lowercase_ : List[str] = left_shift(left) lowercase_ : int = left_shift(right) lowercase_ : Tuple = apply_table(left + right, pa_table) lowercase_ : List[str] = left_shift(left) lowercase_ : Optional[Any] = left_shift(right) lowercase_ : Union[str, Any] = left_shift(left) lowercase_ : Union[str, Any] = left_shift(right) lowercase_ : Optional[int] = apply_table(left + right, pa_table) # encryption lowercase_ : int = apply_table(message, IP) lowercase_ : Dict = function(expansion, sa, sa, keya, temp) lowercase_ : Any = temp[4:] + temp[:4] lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp) lowercase_ : Tuple = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption lowercase_ : List[str] = apply_table(CT, IP) lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = temp[4:] + temp[:4] lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
653
0
lowercase_ : Any = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
709
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase_ : int = 50_0000 lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__) lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ): lowercase = dataset.map(**lowercase_ ) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ): lowercase = dataset.filter(**lowercase_ ) def SCREAMING_SNAKE_CASE ( ): lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowercase = generate_example_dataset( os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ ) lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) def tokenize(lowercase_ : Dict ): return tokenizer(examples["""text"""] ) lowercase = map(lowercase_ ) lowercase = map(lowercase_ , batched=lowercase_ ) lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""numpy""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""pandas""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) lowercase = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , """wb""" ) as f: f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
653
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : int = 100 , ): lowercase = x_start lowercase = fnc(lowerCamelCase__ ) lowercase = 0.0 for _ in range(lowerCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase = (x_end - x_start) / steps + xa lowercase = fnc(lowerCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase = xa lowercase = fxa return area if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ): return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowercase_ : List[Any] = 10 while i <= 10_0000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
710
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ): lowercase = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality lowercase = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase = tf.placeholder("""float64""" , [dim] ) lowercase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase = tf.placeholder("""int32""" ) lowercase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase = tf.placeholder("""float""" , [noofclusters] ) lowercase = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase = 100 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): lowercase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster lowercase = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase = sess.run(lowercase_ ) lowercase = sess.run(lowercase_ ) return centroids, assignments
653
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
711
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 lowercase = update_area_of_max_square(lowercase_ , col + 1 ) lowercase = update_area_of_max_square(row + 1 , col + 1 ) lowercase = update_area_of_max_square(row + 1 , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) return sub_problem_sol else: return 0 lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square_using_dp_array( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) lowercase = sub_problem_sol return sub_problem_sol else: return 0 lowercase = [0] lowercase = [[-1] * cols for _ in range(lowercase_ )] update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = dp_array[row][col + 1] lowercase = dp_array[row + 1][col + 1] lowercase = dp_array[row + 1][col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(dp_array[row][col] , lowercase_ ) else: lowercase = 0 return largest_square_area def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [0] * (cols + 1) lowercase = [0] * (cols + 1) lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = current_row[col + 1] lowercase = next_row[col + 1] lowercase = next_row[col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(current_row[col] , lowercase_ ) else: lowercase = 0 lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __UpperCamelCase (UpperCamelCase_ , UpperCamelCase_ ): @register_to_config def __init__( self , _lowerCAmelCase = 768 , ) -> Optional[Any]: '''simple docstring''' super().__init__() lowercase = nn.Parameter(torch.zeros(1 , _a ) ) lowercase = nn.Parameter(torch.ones(1 , _a ) ) def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str: '''simple docstring''' lowercase = nn.Parameter(self.mean.to(_a ).to(_a ) ) lowercase = nn.Parameter(self.std.to(_a ).to(_a ) ) return self def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = (embeds - self.mean) * 1.0 / self.std return embeds def _a ( self , _lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase = (embeds * self.std) + self.mean return embeds
712
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = '''gpt_bigcode''' __A = ['''past_key_values'''] __A = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase = vocab_size lowercase = n_positions lowercase = n_embd lowercase = n_layer lowercase = n_head lowercase = n_inner lowercase = activation_function lowercase = resid_pdrop lowercase = embd_pdrop lowercase = attn_pdrop lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = scale_attn_weights lowercase = use_cache lowercase = attention_softmax_in_fpaa lowercase = scale_attention_softmax_in_fpaa lowercase = multi_query lowercase = bos_token_id lowercase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
653
0
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ): for param in module.parameters(): lowercase = False def SCREAMING_SNAKE_CASE ( ): lowercase = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def SCREAMING_SNAKE_CASE ( lowercase_ : Any ): lowercase = plt.imshow(__UpperCAmelCase ) fig.axes.get_xaxis().set_visible(__UpperCAmelCase ) fig.axes.get_yaxis().set_visible(__UpperCAmelCase ) plt.show() def SCREAMING_SNAKE_CASE ( ): lowercase = datetime.now() lowercase = current_time.strftime("""%H:%M:%S""" ) return timestamp
713
'''simple docstring''' import requests def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = {"""Content-Type""": """application/json"""} lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ ) if response.status_code != 200: lowercase = ( """Request to slack returned an error """ F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(lowercase_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
653
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase_ : List[str] = logging.get_logger(__name__) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
714
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowercase = [144, 192, 240] lowercase = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowercase = [96, 120, 144] lowercase = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowercase = [64, 80, 96] lowercase = [16, 16, 24, 48, 64, 80, 320] lowercase = 0.05 lowercase = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = 512 lowercase = 16 lowercase = 21 lowercase = """pascal-voc-id2label.json""" else: lowercase = 1000 lowercase = """imagenet-1k-id2label.json""" lowercase = """huggingface/label-files""" lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) lowercase = {int(lowercase_ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ): for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowercase = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: lowercase = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: lowercase = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: lowercase = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: lowercase = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: lowercase = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: lowercase = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: lowercase = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: lowercase = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: lowercase = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: lowercase = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): lowercase = """mobilevit.""" + name return name def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ): if base_model: lowercase = """""" else: lowercase = """mobilevit.""" for key in orig_state_dict.copy().keys(): lowercase = orig_state_dict.pop(lowercase_ ) if key[:8] == "encoder.": lowercase = key[8:] if "qkv" in key: lowercase = key.split(""".""" ) lowercase = int(key_split[0][6:] ) - 1 lowercase = int(key_split[3] ) lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowercase = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowercase = val[:dim, :] lowercase = val[dim : dim * 2, :] lowercase = val[-dim:, :] else: lowercase = val[:dim] lowercase = val[dim : dim * 2] lowercase = val[-dim:] else: lowercase = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ): lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ): lowercase = get_mobilevit_config(lowercase_ ) # load original state_dict lowercase = torch.load(lowercase_ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval() else: lowercase = MobileViTForImageClassification(lowercase_ ).eval() lowercase = convert_state_dict(lowercase_ , lowercase_ ) model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase = model(**lowercase_ ) lowercase = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowercase = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowercase = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowercase = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if push_to_hub: lowercase = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) lowercase = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase_ , organization="""apple""" ) model.push_to_hub(lowercase_ , organization="""apple""" ) if __name__ == "__main__": lowercase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowercase_ : List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
653
0
'''simple docstring''' import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowercase_ : int = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None ): lowercase = XLNetConfig.from_json_file(__A ) lowercase = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) lowercase = finetuning_task lowercase = GLUE_TASKS_NUM_LABELS[finetuning_task] lowercase = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: lowercase = finetuning_task lowercase = XLNetForQuestionAnswering(__A ) else: lowercase = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model lowercase = os.path.join(__A , __A ) lowercase = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) lowercase_ : Union[str, Any] = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
715
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = num_labels lowercase = image_size lowercase = layer_depths lowercase = embed_dims def _a ( self ) -> Tuple: '''simple docstring''' lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels def _a ( self ) -> int: '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = SwiftFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = self.num_labels lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' ((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs() lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __A = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> Dict: '''simple docstring''' lowercase = SwiftFormerModelTester(self ) lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _a ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _a ( self ) -> List[str]: '''simple docstring''' pass def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _a ( self ) -> int: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self ) -> Any: '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _a ( self ) -> Optional[Any]: '''simple docstring''' pass def _a ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.hidden_states lowercase = 8 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_lowerCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' def _config_zero_init(_lowerCAmelCase ): lowercase = copy.deepcopy(_lowerCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 ) if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ): lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return configs_no_init lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowercase = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCamelCase (unittest.TestCase ): @cached_property def _a ( self ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase = model(**_lowerCAmelCase ) # verify the logits lowercase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
653
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any]=10 ): lowercase = [] for _ in range(_lowerCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Union[str, Any]=10 ): lowercase = [] for step in range(_lowerCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowercase = os.path.join(_lowerCamelCase , """schedule.bin""" ) torch.save(scheduler.state_dict() , _lowerCamelCase ) lowercase = torch.load(_lowerCamelCase ) scheduler.load_state_dict(_lowerCamelCase ) return lrs @require_torch class __UpperCamelCase (unittest.TestCase ): def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: '''simple docstring''' self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) lowercase = torch.tensor([0.4, 0.2, -0.5] ) lowercase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowercase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): lowercase = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) lowercase = torch.tensor([0.4, 0.2, -0.5] ) lowercase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowercase = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_A , weight_decay=0.0 , relative_step=_A , scale_parameter=_A , warmup_init=_A , ) for _ in range(1000 ): lowercase = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __UpperCamelCase (unittest.TestCase ): __A = nn.Linear(50 , 50 ) if is_torch_available() else None __A = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __A = 10 def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: '''simple docstring''' self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A , msg=_A ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowercase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowercase = data lowercase = scheduler_func(self.optimizer , **_A ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowercase = unwrap_schedule(_A , self.num_steps ) self.assertListAlmostEqual( _A , _A , tol=1E-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) lowercase = scheduler_func(self.optimizer , **_A ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule lowercase = unwrap_and_save_reload_schedule(_A , self.num_steps ) self.assertListEqual(_A , _A , msg=F"""failed for {scheduler_func} in save and reload""" ) class __UpperCamelCase : def __init__( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowercase = fn def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: '''simple docstring''' return self.fn(*_A , **_A ) @classmethod def _a ( self , _lowerCAmelCase ) -> Tuple: '''simple docstring''' lowercase = list(map(self , scheduler.lr_lambdas ) )
716
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] ): lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def SCREAMING_SNAKE_CASE ( ): print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
717
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ : List[str] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
653
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=99 , _lowerCAmelCase=0 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase="last" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_lengths lowercase = use_token_type_ids lowercase = use_labels lowercase = gelu_activation lowercase = sinusoidal_embeddings lowercase = causal lowercase = asm lowercase = n_langs lowercase = vocab_size lowercase = n_special lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = summary_type lowercase = use_proj lowercase = scope lowercase = bos_token_id def _a ( self ) -> Tuple: '''simple docstring''' lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_input_lengths: lowercase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , 2 ).float() lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self ) -> int: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str: '''simple docstring''' lowercase = XLMModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ ) lowercase = model(lowerCamelCase_ , langs=lowerCamelCase_ ) lowercase = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple: '''simple docstring''' lowercase = XLMWithLMHeadModel(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[Any]: '''simple docstring''' lowercase = XLMForQuestionAnsweringSimple(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ ) lowercase = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) lowercase = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: '''simple docstring''' lowercase = XLMForQuestionAnswering(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ ) lowercase = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , ) lowercase = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , ) (lowercase ) = result_with_labels.to_tuple() lowercase = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) (lowercase ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> List[str]: '''simple docstring''' lowercase = XLMForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ ) lowercase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Any: '''simple docstring''' lowercase = self.num_labels lowercase = XLMForTokenClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[Any]: '''simple docstring''' lowercase = self.num_choices lowercase = XLMForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.prepare_config_and_inputs() ( lowercase ) = config_and_inputs lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __A = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Any: '''simple docstring''' lowercase = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) return inputs_dict def _a ( self ) -> int: '''simple docstring''' lowercase = XLMModelTester(self ) lowercase = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 ) def _a ( self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def _a ( self ) -> int: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> Union[str, Any]: '''simple docstring''' self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCamelCase_ ): # adds PAD dummy token lowercase = min_length + idx + 1 lowercase = min_length + idx + 1 lowercase = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCamelCase_ ): # adds PAD dummy token lowercase = min_length + idx + 1 lowercase = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , ) pass @slow def _a ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = XLMModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch class __UpperCamelCase (unittest.TestCase ): @slow def _a ( self ) -> Dict: '''simple docstring''' lowercase = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCamelCase_ ) lowercase = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase_ ) # the president lowercase = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
718
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def SCREAMING_SNAKE_CASE ( ): lowercase = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=_A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=_A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=_A ) return parser.parse_args() def SCREAMING_SNAKE_CASE ( ): lowercase = parse_args() # Import training_script as a module. lowercase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase = script_fpath.stem lowercase = importlib.import_module(_A ) # Patch sys.argv lowercase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
719
'''simple docstring''' def SCREAMING_SNAKE_CASE ( ): lowercase = [] lowercase = 1 while len(lowercase_ ) < 1E6: constant.append(str(lowercase_ ) ) i += 1 lowercase = """""".join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __UpperCamelCase (unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> Optional[int]: '''simple docstring''' lowercase = size if size is not None else {"""height""": 18, """width""": 18} lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = image_size lowercase = min_resolution lowercase = max_resolution lowercase = do_resize lowercase = size lowercase = do_normalize lowercase = image_mean lowercase = image_std def _a ( self ) -> Optional[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __UpperCamelCase (_a , unittest.TestCase ): __A = DPTImageProcessor if is_vision_available() else None def _a ( self ) -> Tuple: '''simple docstring''' lowercase = DPTImageProcessingTester(self ) @property def _a ( self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a ( self ) -> Any: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , """image_mean""" ) ) self.assertTrue(hasattr(snake_case_ , """image_std""" ) ) self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) ) self.assertTrue(hasattr(snake_case_ , """do_resize""" ) ) self.assertTrue(hasattr(snake_case_ , """size""" ) ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , Image.Image ) # Test not batched input lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowercase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _a ( self ) -> str: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , np.ndarray ) # Test not batched input lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowercase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , torch.Tensor ) # Test not batched input lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowercase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
720
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" ) with open(lowercase_ ) as file_hand: return str(sum(int(lowercase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ : str = logging.get_logger(__name__) lowercase_ : List[Any] = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __UpperCamelCase (SCREAMING_SNAKE_CASE__ ): __A = '''levit''' def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=3 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=16 , _lowerCAmelCase=[128, 256, 384] , _lowerCAmelCase=[4, 8, 12] , _lowerCAmelCase=[4, 4, 4] , _lowerCAmelCase=[16, 16, 16] , _lowerCAmelCase=0 , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=0.02 , **_lowerCAmelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**_lowercase ) lowercase = image_size lowercase = num_channels lowercase = kernel_size lowercase = stride lowercase = padding lowercase = hidden_sizes lowercase = num_attention_heads lowercase = depths lowercase = key_dim lowercase = drop_path_rate lowercase = patch_size lowercase = attention_ratio lowercase = mlp_ratio lowercase = initializer_range lowercase = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __UpperCamelCase (SCREAMING_SNAKE_CASE__ ): __A = version.parse('''1.11''' ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a ( self ) -> float: '''simple docstring''' return 1E-4
721
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionPanoramaPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = DDIMScheduler() torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Tuple: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Any: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Dict: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _a ( self ) -> str: '''simple docstring''' lowercase = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Any: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> int: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
653
0
'''simple docstring''' from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __UpperCamelCase (_lowercase , _lowercase ): __A = '''pixel_values''' __A = False __A = TimmBackboneConfig def __init__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' requires_backends(self , """timm""" ) super().__init__(A_ ) lowercase = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(A_ , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) lowercase = getattr(A_ , """use_pretrained_backbone""" , A_ ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. lowercase = config.out_indices if getattr(A_ , """out_indices""" , A_ ) is not None else (-1,) lowercase = timm.create_model( config.backbone , pretrained=A_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=A_ , **A_ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowercase = self._backbone.return_layers lowercase = {layer["""module"""]: str(A_ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(A_ ) @classmethod def _a ( cls , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig lowercase = kwargs.pop("""config""" , TimmBackboneConfig() ) lowercase = kwargs.pop("""use_timm_backbone""" , A_ ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) lowercase = kwargs.pop("""num_channels""" , config.num_channels ) lowercase = kwargs.pop("""features_only""" , config.features_only ) lowercase = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) lowercase = kwargs.pop("""out_indices""" , config.out_indices ) lowercase = TimmBackboneConfig( backbone=A_ , num_channels=A_ , features_only=A_ , use_pretrained_backbone=A_ , out_indices=A_ , ) return super()._from_config(A_ , **A_ ) def _a ( self , _lowerCAmelCase ) -> Any: '''simple docstring''' pass def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: '''simple docstring''' lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowercase = self._all_layers lowercase = self._backbone(A_ , **A_ ) lowercase = self._return_layers lowercase = tuple(hidden_states[i] for i in self.out_indices ) else: lowercase = self._backbone(A_ , **A_ ) lowercase = None lowercase = tuple(A_ ) lowercase = tuple(A_ ) if hidden_states is not None else None if not return_dict: lowercase = (feature_maps,) if output_hidden_states: lowercase = output + (hidden_states,) return output return BackboneOutput(feature_maps=A_ , hidden_states=A_ , attentions=A_ )
700
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) __A = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) __A = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase = SeqaSeqDataset # Get datasets lowercase = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowercase = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) lowercase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) lowercase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowercase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase = train_result.metrics lowercase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate(metric_key_prefix="""val""" ) lowercase = data_args.n_val lowercase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) lowercase = test_output.metrics lowercase = data_args.n_test if trainer.is_world_process_zero(): lowercase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: lowercase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) lowercase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): return int((input_a, input_a).count(0 ) != 0 ) def SCREAMING_SNAKE_CASE ( ): assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
701
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = {} lowercase = {} if prompt is not None: lowercase = prompt if generate_kwargs is not None: lowercase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: '''simple docstring''' lowercase = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase = self.model.config.model_type if model_type == "git": lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids lowercase = [self.tokenizer.cls_token_id] + input_ids lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase = None return model_inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase = None if generate_kwargs is None: lowercase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase = model_inputs.pop(self.model.main_input_name ) lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] for output_ids in model_outputs: lowercase = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
653
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ): __A = KandinskyImgaImgPipeline __A = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] __A = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] __A = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __A = False @property def _a ( self ) -> List[Any]: '''simple docstring''' return 32 @property def _a ( self ) -> Dict: '''simple docstring''' return 32 @property def _a ( self ) -> Optional[Any]: '''simple docstring''' return self.time_input_dim @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self ) -> Any: '''simple docstring''' return 100 @property def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def _a ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowercase = MultilingualCLIP(_lowerCAmelCase ) lowercase = text_encoder.eval() return text_encoder @property def _a ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowercase = UNetaDConditionModel(**_lowerCAmelCase ) return model @property def _a ( self ) -> int: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = self.dummy_text_encoder lowercase = self.dummy_tokenizer lowercase = self.dummy_unet lowercase = self.dummy_movq lowercase = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } lowercase = DDIMScheduler(**_lowerCAmelCase ) lowercase = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> List[str]: '''simple docstring''' lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase ) # create init_image lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) ) if str(_lowerCAmelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(_lowerCAmelCase ) else: lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = """cpu""" lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) ) lowercase = output.images lowercase = pipe( **self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0] lowercase = image[0, -3:, -3:, -1] lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array( [0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self ) -> Dict: '''simple docstring''' lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowercase = """A red cartoon frog, 4k""" lowercase = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCAmelCase ) lowercase = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) lowercase = pipeline.to(_lowerCAmelCase ) pipeline.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase , lowercase = pipe_prior( _lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() lowercase = pipeline( _lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
702
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
0
'''simple docstring''' import math def SCREAMING_SNAKE_CASE ( lowercase_ : int = 100 ): lowercase = sum(i * i for i in range(1 , n + 1 ) ) lowercase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
703
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml lowercase_ : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : bool , lowercase_ : bool ): def run_func(lowercase_ : Optional[Any] ): @wraps(_lowerCamelCase ) def run_in_eager_mode(*lowercase_ : Optional[Any] , **lowercase_ : List[str] ): return func(*_lowerCamelCase , **_lowerCamelCase ) @wraps(_lowerCamelCase ) @tf.function(experimental_compile=_lowerCamelCase ) def run_in_graph_mode(*lowercase_ : Optional[int] , **lowercase_ : List[str] ): return func(*_lowerCamelCase , **_lowerCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int ): lowercase = random.Random() lowercase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __UpperCamelCase (lowercase__ ): __A = 42 __A = 42 __A = '''TensorFlow''' @property def _a ( self ) -> List[Any]: '''simple docstring''' return tf.__version__ def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: '''simple docstring''' lowercase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return self._measure_speed(_inference ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: '''simple docstring''' lowercase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return self._measure_speed(_train ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> [Memory, Optional[MemorySummary]]: '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase ) lowercase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return self._measure_memory(_inference ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> [Memory, Optional[MemorySummary]]: '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase ) lowercase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return self._measure_memory(_train ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Callable[[], None]: '''simple docstring''' lowercase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase = ( hasattr(_lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , _lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase = __import__("""transformers""" , fromlist=[model_class] ) lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase ) lowercase = model_cls(_lowerCAmelCase ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase = TF_MODEL_MAPPING[config.__class__](_lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , training=_lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(_lowerCAmelCase , training=_lowerCAmelCase ) lowercase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Callable[[], None]: '''simple docstring''' lowercase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowercase = ( hasattr(_lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , _lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowercase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model lowercase = __import__("""transformers""" , fromlist=[model_class] ) lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase ) lowercase = model_cls(_lowerCAmelCase ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowercase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowercase = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0] lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0] lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables ) return gradients lowercase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _a ( self , _lowerCAmelCase ) -> float: '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(_lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowercase = timeit.repeat( _lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(_lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) def _a ( self , _lowerCAmelCase ) -> [Memory, MemorySummary]: '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowercase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowercase = "N/A" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowercase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowercase = nvml.nvmlDeviceGetMemoryInfo(_lowerCAmelCase ) lowercase = meminfo.used lowercase = Memory(_lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowercase = None else: lowercase = measure_peak_memory_cpu(_lowerCAmelCase ) lowercase = Memory(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: lowercase = stop_memory_tracing(_lowerCAmelCase ) if memory is None: lowercase = summary.total else: lowercase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) return "N/A", None
704
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests lowercase_ : List[str] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowercase_ : Any = BASE_URL + '''/user''' # https://github.com/settings/tokens lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''') def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowercase_ , headers=lowercase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
653
0
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase_ : int = logging.get_logger(__name__) lowercase_ : List[str] = '''T5Config''' class __UpperCamelCase (__SCREAMING_SNAKE_CASE ): __A = '''mt5''' __A = MTaConfig class __UpperCamelCase (__SCREAMING_SNAKE_CASE ): __A = '''mt5''' __A = MTaConfig class __UpperCamelCase (__SCREAMING_SNAKE_CASE ): __A = '''mt5''' __A = MTaConfig
705
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ): lowercase = int(round(sample_rate * max_length ) ) if len(lowercase_ ) <= sample_length: return wav lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __UpperCamelCase : __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) __A = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __A = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __UpperCamelCase : __A = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) __A = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( self ) -> List[Any]: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowercase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowercase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowercase = feature_extractor.model_input_names[0] def train_transforms(lowercase_ : int ): lowercase = [] for audio in batch[data_args.audio_column_name]: lowercase = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowercase_ ) lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase_ : Dict ): lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names lowercase , lowercase = {}, {} for i, label in enumerate(lowercase_ ): lowercase = str(lowercase_ ) lowercase = label # Load the accuracy metric from the datasets package lowercase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase_ : Tuple ): lowercase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids ) lowercase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowercase = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ ) # Initialize our trainer lowercase = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Write model card and (optionally) push to hub lowercase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
653
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : str = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> bool: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : int = logging.get_logger(__name__) lowercase_ : List[Any] = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __UpperCamelCase (_UpperCAmelCase ): __A = '''decision_transformer''' __A = ['''past_key_values'''] __A = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=17 , _lowerCAmelCase=4 , _lowerCAmelCase=128 , _lowerCAmelCase=4096 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=1024 , _lowerCAmelCase=3 , _lowerCAmelCase=1 , _lowerCAmelCase=None , _lowerCAmelCase="relu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=False , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> Dict: '''simple docstring''' lowercase = state_dim lowercase = act_dim lowercase = hidden_size lowercase = max_ep_len lowercase = action_tanh lowercase = vocab_size lowercase = n_positions lowercase = n_layer lowercase = n_head lowercase = n_inner lowercase = activation_function lowercase = resid_pdrop lowercase = embd_pdrop lowercase = attn_pdrop lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = scale_attn_weights lowercase = use_cache lowercase = scale_attn_by_inverse_layer_idx lowercase = reorder_and_upcast_attn lowercase = bos_token_id lowercase = eos_token_id super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : str = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __UpperCamelCase (_UpperCAmelCase ): __A = '''vit_msn''' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = qkv_bias
653
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ : Union[str, Any] = logging.get_logger(__name__) lowercase_ : Union[str, Any] = "▁" lowercase_ : str = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} lowercase_ : Tuple = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } lowercase_ : Optional[int] = {"vinai/bartpho-syllable": 1024} class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> str: '''simple docstring''' lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowercase = vocab_file lowercase = monolingual_vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase = {} lowercase = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowercase_ ) not in self.fairseq_tokens_to_ids: lowercase = cnt cnt += 1 with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): lowercase = line.strip().split()[0] lowercase = len(self.fairseq_tokens_to_ids ) if str(lowercase_ ) not in self.fairseq_tokens_to_ids: lowercase = len(self.fairseq_tokens_to_ids ) lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> str: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> Optional[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Optional[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> str: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Any: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def _a ( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , _lowerCAmelCase ) -> Tuple: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def _a ( self , _lowerCAmelCase ) -> Any: '''simple docstring''' lowercase = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip() return out_string def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int: '''simple docstring''' if not os.path.isdir(lowercase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowercase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"""{str(lowercase_ )} \n""" ) return out_vocab_file, out_monolingual_vocab_file
708
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase = """""" for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): return data[1:] + data[0] def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ): lowercase = """""" for i in range(len(lowercase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = int("""0b""" + data[0] + data[-1] , 2 ) lowercase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ): lowercase = message[:4] lowercase = message[4:] lowercase = apply_table(lowercase_ , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741 lowercase = apply_sbox(lowercase_ , temp[4:] ) lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741 lowercase = """0""" * (2 - len(lowercase_ )) + r lowercase = apply_table(l + r , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) return temp + right if __name__ == "__main__": lowercase_ : Tuple = input('''Enter 10 bit key: ''') lowercase_ : Any = input('''Enter 8 bit message: ''') lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] lowercase_ : List[Any] = [2, 4, 3, 1] lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7] lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6] lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1] lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowercase_ : Union[str, Any] = apply_table(key, paa_table) lowercase_ : Optional[Any] = temp[:5] lowercase_ : int = temp[5:] lowercase_ : List[str] = left_shift(left) lowercase_ : int = left_shift(right) lowercase_ : Tuple = apply_table(left + right, pa_table) lowercase_ : List[str] = left_shift(left) lowercase_ : Optional[Any] = left_shift(right) lowercase_ : Union[str, Any] = left_shift(left) lowercase_ : Union[str, Any] = left_shift(right) lowercase_ : Optional[int] = apply_table(left + right, pa_table) # encryption lowercase_ : int = apply_table(message, IP) lowercase_ : Dict = function(expansion, sa, sa, keya, temp) lowercase_ : Any = temp[4:] + temp[:4] lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp) lowercase_ : Tuple = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption lowercase_ : List[str] = apply_table(CT, IP) lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = temp[4:] + temp[:4] lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
653
0
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __UpperCamelCase (a__ ): __A = (KDPMaDiscreteScheduler,) __A = 10 def _a ( self , **_lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _a ( self ) -> Optional[int]: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _a ( self ) -> Optional[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _a ( self ) -> Dict: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _a ( self ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowercase = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = output.prev_sample lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) ) lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2 assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def _a ( self ) -> Tuple: '''simple docstring''' if torch_device == "mps": return lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = output.prev_sample lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) ) lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def _a ( self ) -> Dict: '''simple docstring''' if torch_device == "mps": return lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = output.prev_sample lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) ) lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) ) if str(lowerCAmelCase__ ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
709
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase_ : int = 50_0000 lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__) lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ): lowercase = dataset.map(**lowercase_ ) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ): lowercase = dataset.filter(**lowercase_ ) def SCREAMING_SNAKE_CASE ( ): lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowercase = generate_example_dataset( os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ ) lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) def tokenize(lowercase_ : Dict ): return tokenizer(examples["""text"""] ) lowercase = map(lowercase_ ) lowercase = map(lowercase_ , batched=lowercase_ ) lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""numpy""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""pandas""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) lowercase = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , """wb""" ) as f: f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
653
0
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def SCREAMING_SNAKE_CASE ( ): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , UpperCAmelCase__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def SCREAMING_SNAKE_CASE ( ): assert _test_patching.open is open lowercase = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , UpperCAmelCase__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def SCREAMING_SNAKE_CASE ( ): lowercase = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , UpperCAmelCase__ ): pass def SCREAMING_SNAKE_CASE ( ): lowercase = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , UpperCAmelCase__ ) is None with patch_submodule(_test_patching , """len""" , UpperCAmelCase__ ): assert _test_patching.len is mock assert _test_patching.len is len def SCREAMING_SNAKE_CASE ( ): lowercase = """__test_patch_submodule_start_and_stop_mock__""" lowercase = patch_submodule(_test_patching , """open""" , UpperCAmelCase__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def SCREAMING_SNAKE_CASE ( ): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase = """__test_patch_submodule_successive_join__""" lowercase = """__test_patch_submodule_successive_dirname__""" lowercase = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , UpperCAmelCase__ ): with patch_submodule(_test_patching , """os.rename""" , UpperCAmelCase__ ): with patch_submodule(_test_patching , """os.path.dirname""" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , UpperCAmelCase__ ): with patch_submodule(_test_patching , """os.path.join""" , UpperCAmelCase__ ): with patch_submodule(_test_patching , """os.path.dirname""" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def SCREAMING_SNAKE_CASE ( ): lowercase = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , UpperCAmelCase__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , UpperCAmelCase__ ): pass
710
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ): lowercase = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality lowercase = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase = tf.placeholder("""float64""" , [dim] ) lowercase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase = tf.placeholder("""int32""" ) lowercase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase = tf.placeholder("""float""" , [noofclusters] ) lowercase = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase = 100 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): lowercase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster lowercase = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase = sess.run(lowercase_ ) lowercase = sess.run(lowercase_ ) return centroids, assignments
653
0
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ): lowercase , lowercase = analyze_text(_snake_case ) lowercase = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowercase = sum(single_char_strings.values() ) # one length string lowercase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowercase = single_char_strings[ch] lowercase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowercase = sum(two_char_strings.values() ) lowercase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowercase = cha + cha if sequence in two_char_strings: lowercase = two_char_strings[sequence] lowercase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ): lowercase = Counter() # type: ignore lowercase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def SCREAMING_SNAKE_CASE ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
711
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 lowercase = update_area_of_max_square(lowercase_ , col + 1 ) lowercase = update_area_of_max_square(row + 1 , col + 1 ) lowercase = update_area_of_max_square(row + 1 , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) return sub_problem_sol else: return 0 lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square_using_dp_array( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) lowercase = sub_problem_sol return sub_problem_sol else: return 0 lowercase = [0] lowercase = [[-1] * cols for _ in range(lowercase_ )] update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = dp_array[row][col + 1] lowercase = dp_array[row + 1][col + 1] lowercase = dp_array[row + 1][col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(dp_array[row][col] , lowercase_ ) else: lowercase = 0 return largest_square_area def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [0] * (cols + 1) lowercase = [0] * (cols + 1) lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = current_row[col + 1] lowercase = next_row[col + 1] lowercase = next_row[col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(current_row[col] , lowercase_ ) else: lowercase = 0 lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ : int = logging.get_logger(__name__) lowercase_ : Optional[int] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class __UpperCamelCase (UpperCamelCase_ ): __A = '''camembert''' def __init__( self , _lowerCAmelCase=3_0522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = use_cache lowercase = classifier_dropout class __UpperCamelCase (UpperCamelCase_ ): @property def _a ( self ) -> Union[str, Any]: '''simple docstring''' if self.task == "multiple-choice": lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
712
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = '''gpt_bigcode''' __A = ['''past_key_values'''] __A = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase = vocab_size lowercase = n_positions lowercase = n_embd lowercase = n_layer lowercase = n_head lowercase = n_inner lowercase = activation_function lowercase = resid_pdrop lowercase = embd_pdrop lowercase = attn_pdrop lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = scale_attn_weights lowercase = use_cache lowercase = attention_softmax_in_fpaa lowercase = scale_attention_softmax_in_fpaa lowercase = multi_query lowercase = bos_token_id lowercase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
653
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def _a ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __UpperCamelCase (unittest.TestCase ): __A = MODEL_FOR_OBJECT_DETECTION_MAPPING def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any: '''simple docstring''' lowercase = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 ) for detected_object in outputs: self.assertEqual( _SCREAMING_SNAKE_CASE , { """score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE ), """box""": {"""xmin""": ANY(_SCREAMING_SNAKE_CASE ), """ymin""": ANY(_SCREAMING_SNAKE_CASE ), """xmax""": ANY(_SCREAMING_SNAKE_CASE ), """ymax""": ANY(_SCREAMING_SNAKE_CASE )}, } , ) import datasets lowercase = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) lowercase = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] lowercase = object_detector(_SCREAMING_SNAKE_CASE , threshold=0.0 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for outputs in batch_outputs: self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 ) for detected_object in outputs: self.assertEqual( _SCREAMING_SNAKE_CASE , { """score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE ), """box""": {"""xmin""": ANY(_SCREAMING_SNAKE_CASE ), """ymin""": ANY(_SCREAMING_SNAKE_CASE ), """xmax""": ANY(_SCREAMING_SNAKE_CASE ), """ymax""": ANY(_SCREAMING_SNAKE_CASE )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def _a ( self ) -> int: '''simple docstring''' pass @require_torch def _a ( self ) -> List[str]: '''simple docstring''' lowercase = """hf-internal-testing/tiny-detr-mobilenetsv3""" lowercase = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE ) lowercase = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) lowercase = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE ) lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) lowercase = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def _a ( self ) -> List[str]: '''simple docstring''' lowercase = """facebook/detr-resnet-50""" lowercase = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE ) lowercase = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) lowercase = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE ) lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) lowercase = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = """facebook/detr-resnet-50""" lowercase = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE ) lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) lowercase = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def _a ( self ) -> int: '''simple docstring''' lowercase = 0.9985 lowercase = """facebook/detr-resnet-50""" lowercase = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE ) lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = """Narsil/layoutlmv3-finetuned-funsd""" lowercase = 0.9993 lowercase = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) lowercase = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
713
'''simple docstring''' import requests def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = {"""Content-Type""": """application/json"""} lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ ) if response.status_code != 200: lowercase = ( """Request to slack returned an error """ F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(lowercase_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
653
0
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): lowercase = len(lowercase_ ) # We need to create solution object to save path. lowercase = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )] lowercase = run_maze(lowercase_ , 0 , 0 , lowercase_ ) if solved: print("""\n""".join(str(lowercase_ ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] ): lowercase = len(lowercase_ ) # Final check point. if i == j == (size - 1): lowercase = 1 return True lowercase = (not i < 0) and (not j < 0) # Check lower bounds lowercase = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowercase = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowercase = 1 # check for directions if ( run_maze(lowercase_ , i + 1 , lowercase_ , lowercase_ ) or run_maze(lowercase_ , lowercase_ , j + 1 , lowercase_ ) or run_maze(lowercase_ , i - 1 , lowercase_ , lowercase_ ) or run_maze(lowercase_ , lowercase_ , j - 1 , lowercase_ ) ): return True lowercase = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
714
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowercase = [144, 192, 240] lowercase = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowercase = [96, 120, 144] lowercase = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowercase = [64, 80, 96] lowercase = [16, 16, 24, 48, 64, 80, 320] lowercase = 0.05 lowercase = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = 512 lowercase = 16 lowercase = 21 lowercase = """pascal-voc-id2label.json""" else: lowercase = 1000 lowercase = """imagenet-1k-id2label.json""" lowercase = """huggingface/label-files""" lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) lowercase = {int(lowercase_ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ): for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowercase = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: lowercase = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: lowercase = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: lowercase = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: lowercase = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: lowercase = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: lowercase = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: lowercase = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: lowercase = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: lowercase = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: lowercase = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): lowercase = """mobilevit.""" + name return name def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ): if base_model: lowercase = """""" else: lowercase = """mobilevit.""" for key in orig_state_dict.copy().keys(): lowercase = orig_state_dict.pop(lowercase_ ) if key[:8] == "encoder.": lowercase = key[8:] if "qkv" in key: lowercase = key.split(""".""" ) lowercase = int(key_split[0][6:] ) - 1 lowercase = int(key_split[3] ) lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowercase = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowercase = val[:dim, :] lowercase = val[dim : dim * 2, :] lowercase = val[-dim:, :] else: lowercase = val[:dim] lowercase = val[dim : dim * 2] lowercase = val[-dim:] else: lowercase = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ): lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ): lowercase = get_mobilevit_config(lowercase_ ) # load original state_dict lowercase = torch.load(lowercase_ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval() else: lowercase = MobileViTForImageClassification(lowercase_ ).eval() lowercase = convert_state_dict(lowercase_ , lowercase_ ) model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase = model(**lowercase_ ) lowercase = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowercase = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowercase = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowercase = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if push_to_hub: lowercase = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) lowercase = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase_ , organization="""apple""" ) model.push_to_hub(lowercase_ , organization="""apple""" ) if __name__ == "__main__": lowercase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowercase_ : List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
653
0
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset lowercase_ : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __UpperCamelCase (nn.Module ): def __init__( self , _lowerCAmelCase ) -> Any: '''simple docstring''' super().__init__() lowercase = torchvision.models.resnetaaa(pretrained=__lowercase ) lowercase = list(model.children() )[:-2] lowercase = nn.Sequential(*__lowercase ) lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = self.pool(self.model(__lowercase ) ) lowercase = torch.flatten(__lowercase , start_dim=2 ) lowercase = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __UpperCamelCase (lowercase__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = [json.loads(__lowercase ) for l in open(__lowercase )] lowercase = os.path.dirname(__lowercase ) lowercase = tokenizer lowercase = labels lowercase = len(__lowercase ) lowercase = max_seq_length lowercase = transforms def __len__( self ) -> Any: '''simple docstring''' return len(self.data ) def __getitem__( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) ) lowercase , lowercase , lowercase = sentence[0], sentence[1:-1], sentence[-1] lowercase = sentence[: self.max_seq_length] lowercase = torch.zeros(self.n_classes ) lowercase = 1 lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) lowercase = self.transforms(__lowercase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self ) -> Dict: '''simple docstring''' lowercase = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): lowercase = [len(row["""sentence"""] ) for row in batch] lowercase , lowercase = len(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) lowercase = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long ) lowercase = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): lowercase = input_row["""sentence"""] lowercase = 1 lowercase = torch.stack([row["""image"""] for row in batch] ) lowercase = torch.stack([row["""label"""] for row in batch] ) lowercase = torch.stack([row["""image_start_token"""] for row in batch] ) lowercase = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def SCREAMING_SNAKE_CASE ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def SCREAMING_SNAKE_CASE ( ): return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
715
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = num_labels lowercase = image_size lowercase = layer_depths lowercase = embed_dims def _a ( self ) -> Tuple: '''simple docstring''' lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels def _a ( self ) -> int: '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = SwiftFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = self.num_labels lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' ((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs() lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __A = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> Dict: '''simple docstring''' lowercase = SwiftFormerModelTester(self ) lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _a ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _a ( self ) -> List[str]: '''simple docstring''' pass def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _a ( self ) -> int: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self ) -> Any: '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _a ( self ) -> Optional[Any]: '''simple docstring''' pass def _a ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.hidden_states lowercase = 8 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_lowerCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' def _config_zero_init(_lowerCAmelCase ): lowercase = copy.deepcopy(_lowerCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 ) if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ): lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return configs_no_init lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowercase = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCamelCase (unittest.TestCase ): @cached_property def _a ( self ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase = model(**_lowerCAmelCase ) # verify the logits lowercase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
653
0
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ : List[str] = logging.get_logger(__name__) lowercase_ : List[str] = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __UpperCamelCase (__snake_case ): __A = 'segformer' def __init__( self , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=[2, 2, 2, 2] , _lowerCAmelCase=[8, 4, 2, 1] , _lowerCAmelCase=[32, 64, 160, 256] , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[1, 2, 5, 8] , _lowerCAmelCase=[4, 4, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=256 , _lowerCAmelCase=255 , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**A_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , A_ , ) lowercase = num_channels lowercase = num_encoder_blocks lowercase = depths lowercase = sr_ratios lowercase = hidden_sizes lowercase = patch_sizes lowercase = strides lowercase = mlp_ratios lowercase = num_attention_heads lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = classifier_dropout_prob lowercase = initializer_range lowercase = drop_path_rate lowercase = layer_norm_eps lowercase = decoder_hidden_size lowercase = kwargs.get("""reshape_last_stage""" , A_ ) lowercase = semantic_loss_ignore_index class __UpperCamelCase (__snake_case ): __A = version.parse('''1.11''' ) @property def _a ( self ) -> Optional[Any]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a ( self ) -> List[str]: '''simple docstring''' return 1E-4 @property def _a ( self ) -> Optional[int]: '''simple docstring''' return 12
716
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
0
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (UpperCamelCase__ ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = """nezha""" def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
717
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ : List[str] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
653
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase_ : str = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
718
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
0
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=128 , _lowerCAmelCase=32 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self ) -> str: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) def _a ( self ) -> List[Any]: '''simple docstring''' ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = self.prepare_config_and_inputs() lowercase = True lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = NezhaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) lowercase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) lowercase = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: '''simple docstring''' lowercase = True lowercase = NezhaModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , ) lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , ) lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = NezhaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = NezhaForNextSentencePrediction(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = NezhaForPreTraining(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: '''simple docstring''' lowercase = NezhaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowercase = self.num_labels lowercase = NezhaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: '''simple docstring''' lowercase = self.num_labels lowercase = NezhaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: '''simple docstring''' lowercase = self.num_choices lowercase = NezhaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __UpperCamelCase (__a , __a , __a , unittest.TestCase ): __A = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __A = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) __A = True def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[int]: '''simple docstring''' lowercase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class in get_values(lowerCAmelCase_ ): lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ ) lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = NezhaModelTester(self ) lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ ) def _a ( self ) -> Dict: '''simple docstring''' ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase = None self.model_tester.create_and_check_model_as_decoder( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ ) def _a ( self ) -> str: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase_ ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ ) def _a ( self ) -> str: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ ) def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def _a ( self ) -> Optional[Any]: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = NezhaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @slow @require_torch_gpu def _a ( self ) -> List[str]: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowercase = True lowercase = model_class(config=lowerCAmelCase_ ) lowercase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) lowercase = torch.jit.trace( lowerCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """bert.pt""" ) ) lowercase = torch.jit.load(os.path.join(lowerCAmelCase_ , """bert.pt""" ) , map_location=lowerCAmelCase_ ) loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCAmelCase_ ) ) @require_torch class __UpperCamelCase (unittest.TestCase ): @slow def _a ( self ) -> str: '''simple docstring''' lowercase = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] lowercase = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , lowerCAmelCase_ ) lowercase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def _a ( self ) -> Tuple: '''simple docstring''' lowercase = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] lowercase = torch.Size((1, 6, 2_1128) ) self.assertEqual(output.shape , lowerCAmelCase_ ) lowercase = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
719
'''simple docstring''' def SCREAMING_SNAKE_CASE ( ): lowercase = [] lowercase = 1 while len(lowercase_ ) < 1E6: constant.append(str(lowercase_ ) ) i += 1 lowercase = """""".join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' import fcntl import os import socket import torch import torch.distributed as dist def SCREAMING_SNAKE_CASE ( *lowercase_ : Any ): with open(UpperCamelCase__ , """r""" ) as fh: fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX ) try: print(*UpperCamelCase__ ) finally: fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN ) lowercase_ : Union[str, Any] = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) lowercase_ : Tuple = torch.device('''cuda''', local_rank) lowercase_ : int = socket.gethostname() lowercase_ : Tuple = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowercase_ : Union[str, Any] = dist.get_rank() lowercase_ : str = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
720
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" ) with open(lowercase_ ) as file_hand: return str(sum(int(lowercase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): lowercase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 1: lowercase = F"""Input value of [number={number}] must be > 0""" raise ValueError(_UpperCamelCase ) lowercase = 1 for i in range(1 , _UpperCamelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionPanoramaPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = DDIMScheduler() torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Tuple: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Any: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Dict: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _a ( self ) -> str: '''simple docstring''' lowercase = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Any: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> int: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
653
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any]=False ): try: lowercase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase = default else: # KEY is set, convert it to True or False. try: lowercase = strtobool(lowercase_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value lowercase_ : List[Any] = parse_flag_from_env('''RUN_SLOW''', default=False) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): return unittest.skip("""Test was skipped""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ): return unittest.skipUnless(_run_slow_tests , """test is slow""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Any ): return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ): return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : int ): return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Any ): return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ): return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ): return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : int=None , lowercase_ : Tuple=None ): if test_case is None: return partial(lowercase_ , version=lowercase_ ) return unittest.skipUnless(is_torch_version(""">=""" , lowercase_ ) , F"""test requires torch version >= {version}""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ): return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(lowercase_ ) lowercase_ : int = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(lowercase_ ) class __UpperCamelCase (unittest.TestCase ): __A = True @classmethod def _a ( cls ) -> List[Any]: '''simple docstring''' lowercase = tempfile.mkdtemp() @classmethod def _a ( cls ) -> int: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _a ( self ) -> List[Any]: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A__ ) class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> int: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __UpperCamelCase (unittest.TestCase ): def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = mocks if isinstance(A__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): lowercase = AcceleratorState() lowercase = tensor[None].clone().to(state.device ) lowercase = gather(lowercase_ ).cpu() lowercase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , lowercase_ ): return False return True class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' lowercase = returncode lowercase = stdout lowercase = stderr async def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : int ): while True: lowercase = await stream.readline() if line: callback(lowercase_ ) else: break async def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=False , lowercase_ : Optional[Any]=False ): if echo: print("""\nRunning: """ , """ """.join(lowercase_ ) ) lowercase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowercase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase = [] lowercase = [] def tee(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any]="" ): lowercase = line.decode("""utf-8""" ).rstrip() sink.append(lowercase_ ) if not quiet: print(lowercase_ , lowercase_ , file=lowercase_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stdout , label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stderr , label="""stderr:""" ) ) ), ] , timeout=lowercase_ , ) return _RunOutput(await p.wait() , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Tuple=180 , lowercase_ : Dict=False , lowercase_ : Optional[int]=True ): lowercase = asyncio.get_event_loop() lowercase = loop.run_until_complete( _stream_subprocess(lowercase_ , env=lowercase_ , stdin=lowercase_ , timeout=lowercase_ , quiet=lowercase_ , echo=lowercase_ ) ) lowercase = """ """.join(lowercase_ ) if result.returncode > 0: lowercase = """\n""".join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class __UpperCamelCase (__a ): pass def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Union[str, Any]=False ): try: lowercase = subprocess.check_output(lowercase_ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowercase_ , """decode""" ): lowercase = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{' '.join(lowercase_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
700
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) __A = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) __A = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase = SeqaSeqDataset # Get datasets lowercase = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowercase = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) lowercase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) lowercase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowercase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase = train_result.metrics lowercase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate(metric_key_prefix="""val""" ) lowercase = data_args.n_val lowercase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) lowercase = test_output.metrics lowercase = data_args.n_test if trainer.is_world_process_zero(): lowercase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: lowercase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) lowercase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
653
0
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str] ): lowercase = 0 lowercase = len(_SCREAMING_SNAKE_CASE ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase = i + 1 else: lowercase = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
701
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = {} lowercase = {} if prompt is not None: lowercase = prompt if generate_kwargs is not None: lowercase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: '''simple docstring''' lowercase = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase = self.model.config.model_type if model_type == "git": lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids lowercase = [self.tokenizer.cls_token_id] + input_ids lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase = None return model_inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase = None if generate_kwargs is None: lowercase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase = model_inputs.pop(self.model.main_input_name ) lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] for output_ids in model_outputs: lowercase = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
653
0
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ : Dict = '''src/diffusers''' lowercase_ : Dict = '''.''' # This is to make sure the diffusers module imported is the one in the repo. lowercase_ : List[Any] = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ : Optional[Any] = spec.loader.load_module() def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Tuple ): return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , lowercase__ ) is not None def SCREAMING_SNAKE_CASE ( lowercase_ : Any ): lowercase = object_name.split(""".""" ) lowercase = 0 # First let's find the module where our object lives. lowercase = parts[i] while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ , F"""{module}.py""" ) ): i += 1 if i < len(lowercase__ ): lowercase = os.path.join(lowercase__ , parts[i] ) if i >= len(lowercase__ ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(lowercase__ , F"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase = f.readlines() # Now let's find the class / func in the code! lowercase = """""" lowercase = 0 for name in parts[i + 1 :]: while ( line_index < len(lowercase__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lowercase__ ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowercase = line_index while line_index < len(lowercase__ ) and _should_continue(lines[line_index] , lowercase__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase = lines[start_index:line_index] return "".join(lowercase__ ) lowercase_ : Optional[int] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') lowercase_ : int = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') lowercase_ : List[str] = re.compile(r'''<FILL\s+[^>]*>''') def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ): lowercase = code.split("""\n""" ) lowercase = 0 while idx < len(lowercase__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(lowercase__ ): return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0] return "" def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ): lowercase = len(get_indent(lowercase__ ) ) > 0 if has_indent: lowercase = F"""class Bla:\n{code}""" lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowercase__ ) lowercase = black.format_str(lowercase__ , mode=lowercase__ ) lowercase , lowercase = style_docstrings_in_code(lowercase__ ) return result[len("""class Bla:\n""" ) :] if has_indent else result def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int=False ): with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase = f.readlines() lowercase = [] lowercase = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lowercase__ ): lowercase = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowercase , lowercase , lowercase = search.groups() lowercase = find_code_in_diffusers(lowercase__ ) lowercase = get_indent(lowercase__ ) lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2 lowercase = theoretical_indent lowercase = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowercase = True while line_index < len(lowercase__ ) and should_continue: line_index += 1 if line_index >= len(lowercase__ ): break lowercase = lines[line_index] lowercase = _should_continue(lowercase__ , lowercase__ ) and re.search(F"""^{indent}# End copy""" , lowercase__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase = lines[start_index:line_index] lowercase = """""".join(lowercase__ ) # Remove any nested `Copied from` comments to avoid circular copies lowercase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowercase__ ) is None] lowercase = """\n""".join(lowercase__ ) # Before comparing, use the `replace_pattern` on the original code. if len(lowercase__ ) > 0: lowercase = replace_pattern.replace("""with""" , """""" ).split(""",""" ) lowercase = [_re_replace_pattern.search(lowercase__ ) for p in patterns] for pattern in patterns: if pattern is None: continue lowercase , lowercase , lowercase = pattern.groups() lowercase = re.sub(lowercase__ , lowercase__ , lowercase__ ) if option.strip() == "all-casing": lowercase = re.sub(obja.lower() , obja.lower() , lowercase__ ) lowercase = re.sub(obja.upper() , obja.upper() , lowercase__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowercase = blackify(lines[start_index - 1] + theoretical_code ) lowercase = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:] lowercase = start_index + 1 if overwrite and len(lowercase__ ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lowercase__ ) return diffs def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple = False ): lowercase = glob.glob(os.path.join(lowercase__ , """**/*.py""" ) , recursive=lowercase__ ) lowercase = [] for filename in all_files: lowercase = is_copy_consistent(lowercase__ , lowercase__ ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(lowercase__ ) > 0: lowercase = """\n""".join(lowercase__ ) raise Exception( """Found the following copy inconsistencies:\n""" + diff + """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" ) if __name__ == "__main__": lowercase_ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowercase_ : str = parser.parse_args() check_copies(args.fix_and_overwrite)
702
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
0
'''simple docstring''' import os from pathlib import Path def SCREAMING_SNAKE_CASE ( ): from torch.utils.cpp_extension import load lowercase = Path(lowerCAmelCase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' lowercase = [ root / filename for filename in [ '''vision.cpp''', os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , lowerCAmelCase_ , with_cuda=lowerCAmelCase_ , extra_include_paths=[str(lowerCAmelCase_ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
703
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=1024 ): lowercase = [], [] lowercase = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase = sorted_examples[0] def is_too_big(lowercase_ : Union[str, Any] ): return tok(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): lowercase = new_src + """ """ + src lowercase = new_tgt + """ """ + tgt if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) lowercase = src, tgt else: # can fit, keep adding lowercase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) return finished_src, finished_tgt def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Path , lowercase_ : Tuple , lowercase_ : Tuple ): lowercase = Path(SCREAMING_SNAKE_CASE__ ) save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) for split in ["train"]: lowercase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" lowercase = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] lowercase = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] lowercase = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(F"""packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.""" ) Path(save_path / F"""{split}.source""" ).open("""w""" ).write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) Path(save_path / F"""{split}.target""" ).open("""w""" ).write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) for split in ["val", "test"]: lowercase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / F"""{split}.source""" ) shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / F"""{split}.target""" ) def SCREAMING_SNAKE_CASE ( ): lowercase = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=SCREAMING_SNAKE_CASE__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""--max_seq_len""" , type=SCREAMING_SNAKE_CASE__ , default=128 ) parser.add_argument("""--data_dir""" , type=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--save_path""" , type=SCREAMING_SNAKE_CASE__ ) lowercase = parser.parse_args() lowercase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
704
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests lowercase_ : List[str] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowercase_ : Any = BASE_URL + '''/user''' # https://github.com/settings/tokens lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''') def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowercase_ , headers=lowercase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
653
0
import torch from diffusers import StableDiffusionPipeline lowercase_ : Optional[int] = '''path-to-your-trained-model''' lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowercase_ : Any = '''A photo of sks dog in a bucket''' lowercase_ : Union[str, Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
705
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ): lowercase = int(round(sample_rate * max_length ) ) if len(lowercase_ ) <= sample_length: return wav lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __UpperCamelCase : __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) __A = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __A = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __UpperCamelCase : __A = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) __A = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( self ) -> List[Any]: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowercase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowercase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowercase = feature_extractor.model_input_names[0] def train_transforms(lowercase_ : int ): lowercase = [] for audio in batch[data_args.audio_column_name]: lowercase = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowercase_ ) lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase_ : Dict ): lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names lowercase , lowercase = {}, {} for i, label in enumerate(lowercase_ ): lowercase = str(lowercase_ ) lowercase = label # Load the accuracy metric from the datasets package lowercase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase_ : Tuple ): lowercase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids ) lowercase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowercase = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ ) # Initialize our trainer lowercase = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Write model card and (optionally) push to hub lowercase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ): lowercase = [] if len(lowercase_ ) == 1: return [nums.copy()] for _ in range(len(lowercase_ ) ): lowercase = nums.pop(0 ) lowercase = permute(lowercase_ ) for perm in permutations: perm.append(lowercase_ ) result.extend(lowercase_ ) nums.append(lowercase_ ) return result def SCREAMING_SNAKE_CASE ( lowercase_ : int ): def backtrack(lowercase_ : Union[str, Any] ): if start == len(lowercase_ ) - 1: output.append(nums[:] ) else: for i in range(lowercase_ , len(lowercase_ ) ): lowercase , lowercase = nums[i], nums[start] backtrack(start + 1 ) lowercase , lowercase = nums[i], nums[start] # backtrack lowercase = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowercase_ : Optional[int] = permutea([1, 2, 3]) print(res) doctest.testmod()
706
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (_UpperCAmelCase ): __A = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> bool: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
0
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __UpperCamelCase (_UpperCamelCase ): __A = "EncodecFeatureExtractor" __A = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' super().__init__(__a , __a ) lowercase = self.feature_extractor lowercase = False def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ) -> Dict: '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a ) def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__a , **__a ) lowercase = kwargs.pop("""audio""" , __a ) lowercase = kwargs.pop("""sampling_rate""" , __a ) lowercase = kwargs.pop("""text""" , __a ) if len(__a ) > 0: lowercase = args[0] lowercase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: lowercase = self.tokenizer(__a , **__a ) if audio is not None: lowercase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) if audio is None: return inputs elif text is None: return audio_inputs else: lowercase = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowercase = audio_inputs["padding_mask"] return inputs def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = kwargs.pop("""audio""" , __a ) lowercase = kwargs.pop("""padding_mask""" , __a ) if len(__a ) > 0: lowercase = args[0] lowercase = args[1:] if audio_values is not None: return self._decode_audio(__a , padding_mask=__a ) else: return self.tokenizer.batch_decode(*__a , **__a ) def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[np.ndarray]: '''simple docstring''' lowercase = to_numpy(__a ) lowercase = audio_values.shape if padding_mask is None: return list(__a ) lowercase = to_numpy(__a ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowercase = seq_len - padding_mask.shape[-1] lowercase = 1 - self.feature_extractor.padding_value lowercase = np.pad(__a , ((0, 0), (0, difference)) , """constant""" , constant_values=__a ) lowercase = audio_values.tolist() for i in range(__a ): lowercase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowercase = sliced_audio.reshape(__a , -1 ) return audio_values
707
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : str = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __UpperCamelCase (_UpperCAmelCase ): __A = '''vit_msn''' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = qkv_bias
653
0
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase (UpperCAmelCase_ ): __A = (DDPMScheduler,) def _a ( self , **_lowerCAmelCase ) -> int: '''simple docstring''' lowercase = { """num_train_timesteps""": 1000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**_snake_case ) return config def _a ( self ) -> List[Any]: '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_snake_case ) def _a ( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def _a ( self ) -> Dict: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_snake_case ) def _a ( self ) -> List[Any]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_snake_case ) def _a ( self ) -> int: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_snake_case ) def _a ( self ) -> Any: '''simple docstring''' self.check_over_configs(thresholding=_snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , ) def _a ( self ) -> Any: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def _a ( self ) -> Dict: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=_snake_case ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) lowercase = len(_snake_case ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual lowercase = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(_snake_case ) ) lowercase = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowercase = scheduler_class(**_snake_case ) lowercase = len(_snake_case ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual lowercase = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(_snake_case ) ) lowercase = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) lowercase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_snake_case ) lowercase = scheduler.timesteps for i, timestep in enumerate(_snake_case ): if i == len(_snake_case ) - 1: lowercase = -1 else: lowercase = timesteps[i + 1] lowercase = scheduler.previous_timestep(_snake_case ) lowercase = prev_t.item() self.assertEqual(_snake_case , _snake_case ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) lowercase = [100, 87, 50, 51, 0] with self.assertRaises(_snake_case , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_snake_case ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) lowercase = [100, 87, 50, 1, 0] lowercase = len(_snake_case ) with self.assertRaises(_snake_case , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**_snake_case ) lowercase = [scheduler.config.num_train_timesteps] with self.assertRaises( _snake_case , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_snake_case )
708
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ): lowercase = """""" for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ): return data[1:] + data[0] def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ): lowercase = """""" for i in range(len(lowercase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = int("""0b""" + data[0] + data[-1] , 2 ) lowercase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ): lowercase = message[:4] lowercase = message[4:] lowercase = apply_table(lowercase_ , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741 lowercase = apply_sbox(lowercase_ , temp[4:] ) lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741 lowercase = """0""" * (2 - len(lowercase_ )) + r lowercase = apply_table(l + r , lowercase_ ) lowercase = xor(lowercase_ , lowercase_ ) return temp + right if __name__ == "__main__": lowercase_ : Tuple = input('''Enter 10 bit key: ''') lowercase_ : Any = input('''Enter 8 bit message: ''') lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] lowercase_ : List[Any] = [2, 4, 3, 1] lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7] lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6] lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1] lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowercase_ : Union[str, Any] = apply_table(key, paa_table) lowercase_ : Optional[Any] = temp[:5] lowercase_ : int = temp[5:] lowercase_ : List[str] = left_shift(left) lowercase_ : int = left_shift(right) lowercase_ : Tuple = apply_table(left + right, pa_table) lowercase_ : List[str] = left_shift(left) lowercase_ : Optional[Any] = left_shift(right) lowercase_ : Union[str, Any] = left_shift(left) lowercase_ : Union[str, Any] = left_shift(right) lowercase_ : Optional[int] = apply_table(left + right, pa_table) # encryption lowercase_ : int = apply_table(message, IP) lowercase_ : Dict = function(expansion, sa, sa, keya, temp) lowercase_ : Any = temp[4:] + temp[:4] lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp) lowercase_ : Tuple = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption lowercase_ : List[str] = apply_table(CT, IP) lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = temp[4:] + temp[:4] lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp) lowercase_ : Optional[Any] = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
653
0
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self , _lowerCAmelCase ) -> Any: '''simple docstring''' lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowercase = len(_lowerCAmelCase ) - 1 def _a ( self , _lowerCAmelCase ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self , _lowerCAmelCase ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase = self.basis_function(_lowerCAmelCase ) lowercase = 0.0 lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self , _lowerCAmelCase = 0.01 ) -> Union[str, Any]: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore lowercase = [] # x coordinates of points to plot lowercase = [] # y coordinates of points to plot lowercase = 0.0 while t <= 1: lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowercase = [i[0] for i in self.list_of_points] lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
709
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase_ : int = 50_0000 lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__) lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ): lowercase = dataset.map(**lowercase_ ) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ): lowercase = dataset.filter(**lowercase_ ) def SCREAMING_SNAKE_CASE ( ): lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowercase = generate_example_dataset( os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ ) lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) def tokenize(lowercase_ : Dict ): return tokenizer(examples["""text"""] ) lowercase = map(lowercase_ ) lowercase = map(lowercase_ , batched=lowercase_ ) lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""numpy""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""pandas""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) lowercase = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , """wb""" ) as f: f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
653
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase (UpperCamelCase__ ): def _a ( self ) -> int: '''simple docstring''' lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , """embed_dim""" ) ) self.parent.assertTrue(hasattr(_a , """num_heads""" ) ) class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=64 , _lowerCAmelCase=3 , _lowerCAmelCase=[16, 48, 96] , _lowerCAmelCase=[1, 3, 6] , _lowerCAmelCase=[1, 2, 10] , _lowerCAmelCase=[7, 3, 3] , _lowerCAmelCase=[4, 2, 2] , _lowerCAmelCase=[2, 1, 1] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[False, False, True] , _lowerCAmelCase=[0.0, 0.0, 0.0] , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=2 , ) -> Optional[int]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = image_size lowercase = patch_sizes lowercase = patch_stride lowercase = patch_padding lowercase = is_training lowercase = use_labels lowercase = num_labels lowercase = num_channels lowercase = embed_dim lowercase = num_heads lowercase = stride_kv lowercase = depth lowercase = cls_token lowercase = attention_drop_rate lowercase = initializer_range lowercase = layer_norm_eps def _a ( self ) -> Dict: '''simple docstring''' lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: # create a random int32 tensor of given shape lowercase = ids_tensor([self.batch_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels def _a ( self ) -> List[Any]: '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = TFCvtModel(config=_a ) lowercase = model(_a , training=_a ) lowercase = (self.image_size, self.image_size) lowercase = image_size[0], image_size[1] for i in range(len(self.depth ) ): lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = self.num_labels lowercase = TFCvtForImageClassification(_a ) lowercase = model(_a , labels=_a , training=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.prepare_config_and_inputs() lowercase = config_and_inputs lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __A = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () __A = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = TFCvtModelTester(self ) lowercase = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _a ( self ) -> str: '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def _a ( self ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def _a ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def _a ( self ) -> str: '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def _a ( self ) -> List[str]: '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def _a ( self ) -> int: '''simple docstring''' lowercase = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(_a ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_a ) lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _a ( self ) -> Optional[Any]: '''simple docstring''' def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): lowercase = model_class(_a ) lowercase = model(**self._prepare_for_class(_a , _a ) ) lowercase = outputs.hidden_states lowercase = len(self.model_tester.depth ) self.assertEqual(len(_a ) , _a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_a , _a , _a ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self ) -> Tuple: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _a ( self ) -> Any: '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = TFCvtModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase (unittest.TestCase ): @cached_property def _a ( self ) -> int: '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _a ( self ) -> Tuple: '''simple docstring''' lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_a , return_tensors="""tf""" ) # forward pass lowercase = model(**_a ) # verify the logits lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) lowercase = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1E-4 ) )
710
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ): lowercase = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality lowercase = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase = tf.placeholder("""float64""" , [dim] ) lowercase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase = tf.placeholder("""int32""" ) lowercase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.placeholder("""float""" , [dim] ) lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase = tf.placeholder("""float""" , [noofclusters] ) lowercase = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase = 100 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): lowercase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster lowercase = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase = sess.run(lowercase_ ) lowercase = sess.run(lowercase_ ) return centroids, assignments
653
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowercase_ : int = logging.get_logger(__name__) lowercase_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowercase_ : Dict = { 'vocab_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json' ), }, 'merges_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt' ), }, 'tokenizer_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json', 'roberta-base-openai-detector': ( 'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json' ), 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json' ), }, } lowercase_ : Any = { 'roberta-base': 512, 'roberta-large': 512, 'roberta-large-mnli': 512, 'distilroberta-base': 512, 'roberta-base-openai-detector': 512, 'roberta-large-openai-detector': 512, } class __UpperCamelCase (SCREAMING_SNAKE_CASE_ ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] __A = RobertaTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowercase = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowercase = add_prefix_space lowercase = pre_tok_class(**UpperCamelCase__ ) lowercase = add_prefix_space lowercase = '''post_processor''' lowercase = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ ) if tokenizer_component_instance: lowercase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase = tuple(state["""sep"""] ) if "cls" in state: lowercase = tuple(state["""cls"""] ) lowercase = False if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowercase = add_prefix_space lowercase = True if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets: lowercase = trim_offsets lowercase = True if changes_to_apply: lowercase = getattr(UpperCamelCase__ , state.pop("""type""" ) ) lowercase = component_class(**UpperCamelCase__ ) setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ ) @property def _a ( self ) -> Any: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value lowercase = value def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: '''simple docstring''' lowercase = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: '''simple docstring''' lowercase = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Union[str, Any]: '''simple docstring''' lowercase = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Any: '''simple docstring''' lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Any: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
711
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 lowercase = update_area_of_max_square(lowercase_ , col + 1 ) lowercase = update_area_of_max_square(row + 1 , col + 1 ) lowercase = update_area_of_max_square(row + 1 , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) return sub_problem_sol else: return 0 lowercase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): def update_area_of_max_square_using_dp_array( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ ) lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ ) if mat[row][col]: lowercase = 1 + min([right, diagonal, down] ) lowercase = max(largest_square_area[0] , lowercase_ ) lowercase = sub_problem_sol return sub_problem_sol else: return 0 lowercase = [0] lowercase = [[-1] * cols for _ in range(lowercase_ )] update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ ) return largest_square_area[0] def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )] lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = dp_array[row][col + 1] lowercase = dp_array[row + 1][col + 1] lowercase = dp_array[row + 1][col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(dp_array[row][col] , lowercase_ ) else: lowercase = 0 return largest_square_area def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ): lowercase = [0] * (cols + 1) lowercase = [0] * (cols + 1) lowercase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowercase = current_row[col + 1] lowercase = next_row[col + 1] lowercase = next_row[col] if mat[row][col] == 1: lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ ) lowercase = max(current_row[col] , lowercase_ ) else: lowercase = 0 lowercase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
'''simple docstring''' import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowercase_ : Union[str, Any] = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def SCREAMING_SNAKE_CASE ( ): lowercase = _ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowercase = get_sagemaker_input() else: lowercase = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any]=None ): if subparsers is not None: lowercase = subparsers.add_parser("""config""" , description=lowerCamelCase_ ) else: lowercase = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCamelCase_ ) parser.add_argument( """--config_file""" , default=lowerCamelCase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """ """such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """ """with \'huggingface\'.""" ) , ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = get_user_input() if args.config_file is not None: lowercase = args.config_file else: if not os.path.isdir(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) lowercase = default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(lowerCamelCase_ ) else: config.to_yaml_file(lowerCamelCase_ ) print(F"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ): lowercase = config_command_parser() lowercase = parser.parse_args() config_command(lowerCamelCase_ ) if __name__ == "__main__": main()
712
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = '''gpt_bigcode''' __A = ['''past_key_values'''] __A = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase = vocab_size lowercase = n_positions lowercase = n_embd lowercase = n_layer lowercase = n_head lowercase = n_inner lowercase = activation_function lowercase = resid_pdrop lowercase = embd_pdrop lowercase = attn_pdrop lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = scale_attn_weights lowercase = use_cache lowercase = attention_softmax_in_fpaa lowercase = scale_attention_softmax_in_fpaa lowercase = multi_query lowercase = bos_token_id lowercase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
653
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Optional[int] = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import requests def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ): lowercase = {"""Content-Type""": """application/json"""} lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ ) if response.status_code != 200: lowercase = ( """Request to slack returned an error """ F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(lowercase_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
653
0
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class __UpperCamelCase (__lowercase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]: '''simple docstring''' super().__init__( features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) lowercase = Generator( cache_dir=__A , features=__A , generator=__A , gen_kwargs=__A , **__A , ) def _a ( self ) -> List[str]: '''simple docstring''' if self.streaming: lowercase = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: lowercase = None lowercase = None lowercase = None lowercase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) lowercase = self.builder.as_dataset( split="""train""" , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset
714
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowercase = [144, 192, 240] lowercase = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowercase = [96, 120, 144] lowercase = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowercase = [64, 80, 96] lowercase = [16, 16, 24, 48, 64, 80, 320] lowercase = 0.05 lowercase = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = 512 lowercase = 16 lowercase = 21 lowercase = """pascal-voc-id2label.json""" else: lowercase = 1000 lowercase = """imagenet-1k-id2label.json""" lowercase = """huggingface/label-files""" lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) lowercase = {int(lowercase_ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ): for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowercase = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: lowercase = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: lowercase = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: lowercase = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: lowercase = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: lowercase = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: lowercase = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: lowercase = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: lowercase = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: lowercase = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: lowercase = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): lowercase = """mobilevit.""" + name return name def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ): if base_model: lowercase = """""" else: lowercase = """mobilevit.""" for key in orig_state_dict.copy().keys(): lowercase = orig_state_dict.pop(lowercase_ ) if key[:8] == "encoder.": lowercase = key[8:] if "qkv" in key: lowercase = key.split(""".""" ) lowercase = int(key_split[0][6:] ) - 1 lowercase = int(key_split[3] ) lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowercase = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowercase = val[:dim, :] lowercase = val[dim : dim * 2, :] lowercase = val[-dim:, :] else: lowercase = val[:dim] lowercase = val[dim : dim * 2] lowercase = val[-dim:] else: lowercase = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ): lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ): lowercase = get_mobilevit_config(lowercase_ ) # load original state_dict lowercase = torch.load(lowercase_ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval() else: lowercase = MobileViTForImageClassification(lowercase_ ).eval() lowercase = convert_state_dict(lowercase_ , lowercase_ ) model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase = model(**lowercase_ ) lowercase = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowercase = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowercase = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowercase = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if push_to_hub: lowercase = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) lowercase = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase_ , organization="""apple""" ) model.push_to_hub(lowercase_ , organization="""apple""" ) if __name__ == "__main__": lowercase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowercase_ : List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
653
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class __UpperCamelCase (UpperCamelCase__ ): __A = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __A = Features({'''image''': Image()} ) __A = Features({'''labels''': ClassLabel} ) __A = '''image''' __A = '''labels''' def _a ( self , _lowerCAmelCase ) -> Tuple: '''simple docstring''' if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __A ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) lowercase = copy.deepcopy(self ) lowercase = self.label_schema.copy() lowercase = features[self.label_column] lowercase = label_schema return task_template @property def _a ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
715
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]: '''simple docstring''' lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = num_labels lowercase = image_size lowercase = layer_depths lowercase = embed_dims def _a ( self ) -> Tuple: '''simple docstring''' lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels def _a ( self ) -> int: '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = SwiftFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = self.num_labels lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Optional[Any]: '''simple docstring''' ((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs() lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __A = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _a ( self ) -> Dict: '''simple docstring''' lowercase = SwiftFormerModelTester(self ) lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _a ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _a ( self ) -> List[str]: '''simple docstring''' pass def _a ( self ) -> Dict: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _a ( self ) -> int: '''simple docstring''' lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(_lowerCAmelCase ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self ) -> Any: '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _a ( self ) -> Optional[Any]: '''simple docstring''' pass def _a ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase = outputs.hidden_states lowercase = 8 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_lowerCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self ) -> Dict: '''simple docstring''' def _config_zero_init(_lowerCAmelCase ): lowercase = copy.deepcopy(_lowerCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 ) if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ): lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return configs_no_init lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowercase = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCamelCase (unittest.TestCase ): @cached_property def _a ( self ) -> List[str]: '''simple docstring''' return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase = model(**_lowerCAmelCase ) # verify the logits lowercase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
653
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowercase_ : int = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
716
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE ( ): lowercase = HfArgumentParser(lowercase_ ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=lowercase_ ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(lowercase_ ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase_ ) if len(lowercase_ ) > 0: lowercase = full_error_msg + begin_error_msg + str(lowercase_ ) raise ValueError(lowercase_ ) benchmark.run() if __name__ == "__main__": main()
653
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase_ : str = logging.get_logger(__name__) class __UpperCamelCase (UpperCamelCase_ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PerceiverImageProcessor instead.""" , __a , ) super().__init__(*__a , **__a )
717
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ : List[str] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
653
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : List[Any] = logging.get_logger(__name__) lowercase_ : List[str] = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __UpperCamelCase (_UpperCAmelCase ): __A = '''dpr''' def __init__( self , _lowerCAmelCase=3_0522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase = 0 , **_lowerCAmelCase , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__A , **__A ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = projection_dim lowercase = position_embedding_type
718
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : int = {'''vocab_file''': '''spm_char.model'''} lowercase_ : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } lowercase_ : Optional[Any] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class __UpperCamelCase (_UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: '''simple docstring''' lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _a ( self ) -> List[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self ) -> str: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , _lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(_lowerCAmelCase ) def _a ( self , _lowerCAmelCase ) -> str: '''simple docstring''' lowercase = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _a ( self , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowercase = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) lowercase = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
653
0
'''simple docstring''' from collections import deque def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = len(_lowerCamelCase ) lowercase = deque() lowercase = [False for _ in range(_lowerCamelCase )] lowercase = [-1 for _ in range(_lowerCamelCase )] lowercase = index_of[:] def strong_connect(lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict ): lowercase = index # the number when this node is seen lowercase = index # lowest rank node reachable from here index += 1 stack.append(_lowerCamelCase ) lowercase = True for w in g[v]: if index_of[w] == -1: lowercase = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowercase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: lowercase = [] lowercase = stack.pop() lowercase = False component.append(_lowerCamelCase ) while w != v: lowercase = stack.pop() lowercase = False component.append(_lowerCamelCase ) components.append(_lowerCamelCase ) return index lowercase = [] for v in range(_lowerCamelCase ): if index_of[v] == -1: strong_connect(_lowerCamelCase , 0 , _lowerCamelCase ) return components def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[Any] ): lowercase = [[] for _ in range(_lowerCamelCase )] for u, v in edges: g[u].append(_lowerCamelCase ) return g if __name__ == "__main__": # Test lowercase_ : Any = 7 lowercase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6] lowercase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5] lowercase_ : Dict = [(u, v) for u, v in zip(source, target)] lowercase_ : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
719
'''simple docstring''' def SCREAMING_SNAKE_CASE ( ): lowercase = [] lowercase = 1 while len(lowercase_ ) < 1E6: constant.append(str(lowercase_ ) ) i += 1 lowercase = """""".join(lowercase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __UpperCamelCase (_UpperCAmelCase ): __A = '''MCTCTFeatureExtractor''' __A = '''AutoTokenizer''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' super().__init__(A__ , A__ ) lowercase = self.feature_extractor lowercase = False def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) lowercase = kwargs.pop("""raw_speech""" ) else: lowercase = kwargs.pop("""audio""" , A__ ) lowercase = kwargs.pop("""sampling_rate""" , A__ ) lowercase = kwargs.pop("""text""" , A__ ) if len(A__ ) > 0: lowercase = args[0] lowercase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: lowercase = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: lowercase = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: lowercase = encodings["""input_ids"""] return inputs def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*A__ , **A__ ) def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*A__ , **A__ ) lowercase = kwargs.pop("""input_features""" , A__ ) lowercase = kwargs.pop("""labels""" , A__ ) if len(A__ ) > 0: lowercase = args[0] lowercase = args[1:] if input_features is not None: lowercase = self.feature_extractor.pad(A__ , *A__ , **A__ ) if labels is not None: lowercase = self.tokenizer.pad(A__ , **A__ ) if labels is None: return input_features elif input_features is None: return labels else: lowercase = labels["""input_ids"""] return input_features def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*A__ , **A__ ) @contextmanager def _a ( self ) -> List[Any]: '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) lowercase = True lowercase = self.tokenizer yield lowercase = self.feature_extractor lowercase = False
720
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" ) with open(lowercase_ ) as file_hand: return str(sum(int(lowercase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
653
0
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowercase_ : int ): lowercase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def SCREAMING_SNAKE_CASE ( lowercase_ : int = 5000 ): lowercase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )] for i, pentagonal_i in enumerate(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ): lowercase = pentagonal_nums[j] lowercase = pentagonal_i + pentagonal_j lowercase = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
721
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): __A = StableDiffusionPanoramaPipeline __A = TEXT_TO_IMAGE_PARAMS __A = TEXT_TO_IMAGE_BATCH_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS __A = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase = DDIMScheduler() torch.manual_seed(0 ) lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase = CLIPTextModel(_lowerCAmelCase ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a ( self ) -> int: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self ) -> str: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _a ( self ) -> List[Any]: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = """french fries""" lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Tuple: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Any: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> Dict: '''simple docstring''' lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase ) lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]: '''simple docstring''' lowercase = torch.manual_seed(_lowerCAmelCase ) lowercase = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _a ( self ) -> str: '''simple docstring''' lowercase = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase ) lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _a ( self ) -> Any: '''simple docstring''' lowercase = 0 def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None: lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase = latents[0, -3:, -3:, -1] lowercase = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase = False lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() lowercase = self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _a ( self ) -> int: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = """stabilityai/stable-diffusion-2-base""" lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase = self.get_inputs() lowercase = pipe(**_lowerCAmelCase ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
653
0
'''simple docstring''' from math import factorial lowercase_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def SCREAMING_SNAKE_CASE ( lowercase_ : int ): if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE ( lowercase_ : int = 60 , lowercase_ : int = 100_0000 ): if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowercase = 0 # the cached sizes of the previous chains lowercase = {} for start_chain_element in range(1 , _lowerCamelCase ): # The temporary set will contain the elements of the chain lowercase = set() lowercase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowercase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_lowerCamelCase ) chain_set_length += 1 lowercase = digit_factorial_sum(_lowerCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowercase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
700
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class __UpperCamelCase : __A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) __A = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) __A = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) __A = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) __A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} ) __A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase = SeqaSeqDataset # Get datasets lowercase = ( dataset_class( lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowercase = ( dataset_class( lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase = ( dataset_class( lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) lowercase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) lowercase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowercase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase = train_result.metrics lowercase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate(metric_key_prefix="""val""" ) lowercase = data_args.n_val lowercase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" ) lowercase = test_output.metrics lowercase = data_args.n_test if trainer.is_world_process_zero(): lowercase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: lowercase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) lowercase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
653
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase (snake_case_ ): __A = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_lowerCAmelCase ) -> str: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase = deprecated_arg[3:] lowercase = not kwargs.pop(_lowerCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase = kwargs.pop("""tpu_name""" , self.tpu_name ) lowercase = kwargs.pop("""device_idx""" , self.device_idx ) lowercase = kwargs.pop("""eager_mode""" , self.eager_mode ) lowercase = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**_lowerCAmelCase ) __A = field( default=snake_case_ , metadata={'''help''': '''Name of TPU'''} , ) __A = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __A = field(default=snake_case_ , metadata={'''help''': '''Benchmark models in eager model.'''} ) __A = field( default=snake_case_ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ["""tf"""] ) lowercase = None if self.tpu: try: if self.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase = None return tpu @cached_property def _a ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _a ( self ) -> int: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self ) -> List[str]: '''simple docstring''' requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self ) -> Any: '''simple docstring''' requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self ) -> Optional[int]: '''simple docstring''' return self.n_gpu > 0
701
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCamelCase (_UpperCAmelCase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: '''simple docstring''' lowercase = {} lowercase = {} if prompt is not None: lowercase = prompt if generate_kwargs is not None: lowercase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowercase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any: '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: '''simple docstring''' lowercase = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError( F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """ """Note also that one single text can be provided for conditional image to text generation.""" ) lowercase = self.model.config.model_type if model_type == "git": lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids lowercase = [self.tokenizer.cls_token_id] + input_ids lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""" ) else: lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase = None return model_inputs def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowercase = None if generate_kwargs is None: lowercase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase = model_inputs.pop(self.model.main_input_name ) lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase ) return model_outputs def _a ( self , _lowerCAmelCase ) -> List[str]: '''simple docstring''' lowercase = [] for output_ids in model_outputs: lowercase = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , ) } records.append(_lowerCAmelCase ) return records
653
0
from __future__ import annotations import math def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : int ): lowercase = u for i in range(1 , SCREAMING_SNAKE_CASE_ ): lowercase = temp * (u - i) return temp def SCREAMING_SNAKE_CASE ( ): lowercase = int(input("""enter the numbers of values: """ ) ) lowercase = [] for _ in range(SCREAMING_SNAKE_CASE_ ): y.append([] ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): y[i].append(SCREAMING_SNAKE_CASE_ ) lowercase = 0 print("""enter the values of parameters in a list: """ ) lowercase = list(map(SCREAMING_SNAKE_CASE_ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(SCREAMING_SNAKE_CASE_ ): lowercase = float(input() ) lowercase = int(input("""enter the value to interpolate: """ ) ) lowercase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , SCREAMING_SNAKE_CASE_ ): for j in range(n - i ): lowercase = y[j + 1][i - 1] - y[j][i - 1] lowercase = y[0][0] for i in range(1 , SCREAMING_SNAKE_CASE_ ): summ += (ucal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE_ ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
702
'''simple docstring''' from ... import PretrainedConfig lowercase_ : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __A = '''nezha''' def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = max_relative_position lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = use_cache
653
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : int = logging.get_logger(__name__) lowercase_ : str = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class __UpperCamelCase (_UpperCAmelCase ): __A = '''autoformer''' __A = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase=True , _lowerCAmelCase = 10 , _lowerCAmelCase = 25 , _lowerCAmelCase = 3 , **_lowerCAmelCase , ) -> str: '''simple docstring''' lowercase = prediction_length lowercase = context_length if context_length is not None else prediction_length lowercase = distribution_output lowercase = loss lowercase = input_size lowercase = num_time_features lowercase = lags_sequence lowercase = scaling lowercase = num_dynamic_real_features lowercase = num_static_real_features lowercase = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowercase = cardinality else: lowercase = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowercase = embedding_dimension else: lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase = num_parallel_samples # Transformer architecture configuration lowercase = input_size * len(self.lags_sequence ) + self._number_of_features lowercase = d_model lowercase = encoder_attention_heads lowercase = decoder_attention_heads lowercase = encoder_ffn_dim lowercase = decoder_ffn_dim lowercase = encoder_layers lowercase = decoder_layers lowercase = dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = encoder_layerdrop lowercase = decoder_layerdrop lowercase = activation_function lowercase = init_std lowercase = use_cache # Autoformer lowercase = label_length lowercase = moving_average lowercase = autocorrelation_factor super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase ) @property def _a ( self ) -> Dict: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
703
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : bool = True , lowercase_ : float = math.inf , lowercase_ : float = -math.inf , lowercase_ : float = math.inf , lowercase_ : float = -math.inf , lowercase_ : bool = False , lowercase_ : float = 100 , lowercase_ : float = 0.01 , lowercase_ : float = 1 , ): lowercase = False lowercase = search_prob lowercase = start_temperate lowercase = [] lowercase = 0 lowercase = None while not search_end: lowercase = current_state.score() if best_state is None or current_score > best_state.score(): lowercase = current_state scores.append(__lowercase ) iterations += 1 lowercase = None lowercase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowercase = random.randint(0 , len(__lowercase ) - 1 ) # picking a random neighbor lowercase = neighbors.pop(__lowercase ) lowercase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowercase = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowercase = picked_neighbor else: lowercase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowercase = picked_neighbor lowercase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowercase = True else: lowercase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__lowercase ) , __lowercase ) plt.xlabel("""Iterations""" ) plt.ylabel("""Function values""" ) plt.show() return best_state if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ): return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowercase_ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ : Tuple = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) lowercase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ : Any = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Optional[int] ): return (3 * x**2) - (6 * y) lowercase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' ) lowercase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ : int = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' )
704
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests lowercase_ : List[str] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowercase_ : Any = BASE_URL + '''/user''' # https://github.com/settings/tokens lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''') def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowercase_ , headers=lowercase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
653
0
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __UpperCamelCase (__lowercase , unittest.TestCase ): __A = CTRLTokenizer __A = False __A = False def _a ( self ) -> List[str]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] lowercase = dict(zip(_A , range(len(_A ) ) ) ) lowercase = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] lowercase = {"""unk_token""": """<unk>"""} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_A ) ) def _a ( self , **_lowerCAmelCase ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A ) def _a ( self , _lowerCAmelCase ) -> Tuple: '''simple docstring''' lowercase = """adapt react readapt apt""" lowercase = """adapt react readapt apt""" return input_text, output_text def _a ( self ) -> Dict: '''simple docstring''' lowercase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase = """adapt react readapt apt""" lowercase = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() lowercase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase = tokens + [tokenizer.unk_token] lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
705
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ): lowercase = int(round(sample_rate * max_length ) ) if len(lowercase_ ) <= sample_length: return wav lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __UpperCamelCase : __A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) __A = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __A = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __A = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) __A = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __A = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __UpperCamelCase : __A = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) __A = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) __A = field( default=_UpperCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __A = field( default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( self ) -> List[Any]: '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowercase = DatasetDict() lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowercase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowercase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowercase = feature_extractor.model_input_names[0] def train_transforms(lowercase_ : int ): lowercase = [] for audio in batch[data_args.audio_column_name]: lowercase = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowercase_ ) lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase_ : Dict ): lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate ) lowercase = {model_input_name: inputs.get(lowercase_ )} lowercase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names lowercase , lowercase = {}, {} for i, label in enumerate(lowercase_ ): lowercase = str(lowercase_ ) lowercase = label # Load the accuracy metric from the datasets package lowercase = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase_ : Tuple ): lowercase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids ) lowercase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowercase = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ ) # Initialize our trainer lowercase = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=lowercase_ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Write model card and (optionally) push to hub lowercase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase_ ) else: trainer.create_model_card(**lowercase_ ) if __name__ == "__main__": main()
653
0